From 92948d5f2a5dc614e2f0a98f6c80c7deb3870dc0 Mon Sep 17 00:00:00 2001 From: duanhongyi Date: Sat, 14 Sep 2024 13:49:05 +0800 Subject: [PATCH 01/93] chore(woodpecker): add tag event --- .woodpecker/build.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.woodpecker/build.yml b/.woodpecker/build.yml index d6ab4eaf..200d6837 100644 --- a/.woodpecker/build.yml +++ b/.woodpecker/build.yml @@ -10,21 +10,22 @@ steps: when: event: - pull_request + - tag - push - name: publish image: bash commands: - export BOT_GITHUB_TOKEN=$GITHUB_TOKEN + - export PULL_BASE_REF=$([ -z $CI_COMMIT_TAG ] && echo latest || echo $CI_COMMIT_TAG) - make prepare-assets - - make latest-release + - make $([ -z $CI_COMMIT_TAG ] && echo latest-release || echo push-release) environment: - GIT_REPO: ${CI_REPO} - GIT_TAG: ${CI_COMMIT_TAG=latest} REPO_NAME: ${CI_REPO_NAME} REPO_OWNER: ${CI_REPO_OWNER} secrets: - github_token when: event: + - tag - push From 8e1e578bfdd20b4b6dd3baad47aed5f388d6ceab Mon Sep 17 00:00:00 2001 From: duanhongyi Date: Wed, 18 Sep 2024 09:29:31 +0800 Subject: [PATCH 02/93] chore(woodpecker): add env export --- .woodpecker/build.yml | 16 +++++++++------- scripts/remove_latest_release.sh | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.woodpecker/build.yml b/.woodpecker/build.yml index 200d6837..138adcb5 100644 --- a/.woodpecker/build.yml +++ b/.woodpecker/build.yml @@ -8,16 +8,18 @@ steps: commands: - make check when: - event: - - pull_request - - tag - - push + - event: tag + ref: refs/tags/v* + - event: push + - event: pull_request - name: publish image: bash commands: - export BOT_GITHUB_TOKEN=$GITHUB_TOKEN - export PULL_BASE_REF=$([ -z $CI_COMMIT_TAG ] && echo latest || echo $CI_COMMIT_TAG) + - export GIT_REPO=${CI_REPO} + - export GIT_TAG=${PULL_BASE_REF} - make prepare-assets - make $([ -z $CI_COMMIT_TAG ] && echo latest-release || echo push-release) environment: @@ -26,6 +28,6 @@ steps: secrets: - github_token when: - event: - - tag - - push + - event: tag + ref: refs/tags/v* + - event: push diff --git a/scripts/remove_latest_release.sh b/scripts/remove_latest_release.sh index 648a8a0d..3665491e 100755 --- a/scripts/remove_latest_release.sh +++ b/scripts/remove_latest_release.sh @@ -4,7 +4,7 @@ set -e GIT_REPO=$1 echo "Getting latest release from $GIT_REPO" -RESP=$(curl -s -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/$GIT_REPO/releases/latest") +RESP=$(curl -s -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/$GIT_REPO/releases/tags/latest") URLS=$(echo "$RESP" | jq -r .url) if [ "${URLS}" ] && [ -z "${URLS}" ]; then echo ${RESP} From 812c284a3e5d92707ad348e3cd69840fea104d94 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 20 Sep 2024 14:08:54 +0800 Subject: [PATCH 03/93] chore(addons): change volumePermissions default value --- addons/kvrocks/2.8/chart/kvrocks/values.yaml | 2 +- addons/minio/2023/chart/minio/values.yaml | 2 +- addons/opensearch/2.10/chart/opensearch/values.yaml | 2 +- addons/rabbitmq/3.12/chart/rabbitmq/values.yaml | 2 +- addons/redis/7.0/chart/redis/values.yaml | 2 +- addons/zookeeper/3.9/chart/zookeeper/values.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/addons/kvrocks/2.8/chart/kvrocks/values.yaml b/addons/kvrocks/2.8/chart/kvrocks/values.yaml index d575ed5c..de27b7e8 100644 --- a/addons/kvrocks/2.8/chart/kvrocks/values.yaml +++ b/addons/kvrocks/2.8/chart/kvrocks/values.yaml @@ -1571,7 +1571,7 @@ metrics: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` ## - enabled: true + enabled: false ## Bitnami Shell image ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ ## @param volumePermissions.image.registry Bitnami Shell image registry diff --git a/addons/minio/2023/chart/minio/values.yaml b/addons/minio/2023/chart/minio/values.yaml index 4d3d1812..c925090d 100644 --- a/addons/minio/2023/chart/minio/values.yaml +++ b/addons/minio/2023/chart/minio/values.yaml @@ -904,7 +904,7 @@ persistentVolumeClaimRetentionPolicy: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` ## - enabled: true + enabled: false ## @param volumePermissions.image.registry Init container volume-permissions image registry ## @param volumePermissions.image.repository Init container volume-permissions image repository ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) diff --git a/addons/opensearch/2.10/chart/opensearch/values.yaml b/addons/opensearch/2.10/chart/opensearch/values.yaml index 3c3597ea..eccb0e66 100644 --- a/addons/opensearch/2.10/chart/opensearch/values.yaml +++ b/addons/opensearch/2.10/chart/opensearch/values.yaml @@ -2183,7 +2183,7 @@ ingest: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) ## - enabled: true + enabled: false ## @param volumePermissions.image.registry Init container volume-permissions image registry ## @param volumePermissions.image.repository Init container volume-permissions image name ## @param volumePermissions.image.tag Init container volume-permissions image tag diff --git a/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml b/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml index 3914f80a..07c2b123 100644 --- a/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml +++ b/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml @@ -1265,7 +1265,7 @@ metrics: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` ## - enabled: true + enabled: false ## @param volumePermissions.image.registry Init container volume-permissions image registry ## @param volumePermissions.image.repository Init container volume-permissions image repository ## @param volumePermissions.image.tag Init container volume-permissions image tag diff --git a/addons/redis/7.0/chart/redis/values.yaml b/addons/redis/7.0/chart/redis/values.yaml index 1fa1389d..48049bf1 100644 --- a/addons/redis/7.0/chart/redis/values.yaml +++ b/addons/redis/7.0/chart/redis/values.yaml @@ -1621,7 +1621,7 @@ metrics: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` ## - enabled: true + enabled: false ## Bitnami Shell image ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ ## @param volumePermissions.image.registry Bitnami Shell image registry diff --git a/addons/zookeeper/3.9/chart/zookeeper/values.yaml b/addons/zookeeper/3.9/chart/zookeeper/values.yaml index f9a86f96..e280599b 100644 --- a/addons/zookeeper/3.9/chart/zookeeper/values.yaml +++ b/addons/zookeeper/3.9/chart/zookeeper/values.yaml @@ -655,7 +655,7 @@ persistentVolumeClaimRetentionPolicy: volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## - enabled: true + enabled: false ## @param volumePermissions.image.registry Init container volume-permissions image registry ## @param volumePermissions.image.repository Init container volume-permissions image repository ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) From 44d0ddc88ce42700ba3ebe87e1c5564d8719943a Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 10 Oct 2024 13:19:11 +0800 Subject: [PATCH 04/93] chore(minio): change minio plan mem limit --- addons/minio/2023/plans/standard-v8d4s3T/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/addons/minio/2023/plans/standard-v8d4s3T/values.yaml b/addons/minio/2023/plans/standard-v8d4s3T/values.yaml index ff1d51f7..82bde774 100644 --- a/addons/minio/2023/plans/standard-v8d4s3T/values.yaml +++ b/addons/minio/2023/plans/standard-v8d4s3T/values.yaml @@ -14,10 +14,10 @@ fullnameOverride: hb-minio-standard-v8d4s3T resources: limits: cpu: 8 - memory: 32Gi + memory: 64Gi requests: cpu: 4 - memory: 16Gi + memory: 32Gi ## MinIO® statefulset parameters ## Only when mode is 'distributed' From d240f9e0d5cd69ea47e84761d947e6a8c172bebd Mon Sep 17 00:00:00 2001 From: lijianguo Date: Sat, 12 Oct 2024 19:10:26 +0800 Subject: [PATCH 05/93] chore(redis-cluster): quote ioThread.doReads --- .../7.0/chart/redis-cluster/templates/redis-statefulset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml index 473fd701..59146af0 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml @@ -146,7 +146,7 @@ spec: value: "hostname" {{- if .Values.ioThread.enabled }} - name: REDIS_IO_THREADS_DO_READS - value: {{ .Values.ioThread.doReads }} + value: {{ .Values.ioThread.doReads | quote }} - name: REDIS_IO_THREADS value: {{ .Values.ioThread.counts | quote }} {{- end }} From f6c0e1df94a939481eea0c5348f8b611e035180b Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 14 Oct 2024 13:59:18 +0800 Subject: [PATCH 06/93] chore(addons): scale sts retain pvc --- addons/airflow/2/chart/airflow/values.yaml | 2 +- addons/clickhouse/24/chart/clickhouse/values.yaml | 2 +- addons/flink/1.17/chart/flink/values.yaml | 2 +- addons/kafka/3.6/chart/kafka/values.yaml | 4 ++-- addons/kvrocks/2.8/chart/kvrocks/values.yaml | 6 +++--- addons/minio/2023/chart/minio/values.yaml | 2 +- .../7.0/chart/mongodb/templates/replicaset/statefulset.yaml | 2 +- addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml | 2 +- addons/opensearch/2.10/chart/opensearch/values.yaml | 4 ++-- .../15/chart/postgresql-cluster/values.yaml | 2 +- addons/rabbitmq/3.12/chart/rabbitmq/values.yaml | 2 +- addons/redis-cluster/7.0/chart/redis-cluster/values.yaml | 2 +- addons/redis/7.0/chart/redis/values.yaml | 6 +++--- addons/seaweedfs/3/chart/seaweedfs/values.yaml | 6 +++--- addons/spark/3.4/chart/spark/values.yaml | 4 ++-- addons/zookeeper/3.9/chart/zookeeper/values.yaml | 2 +- 16 files changed, 25 insertions(+), 25 deletions(-) diff --git a/addons/airflow/2/chart/airflow/values.yaml b/addons/airflow/2/chart/airflow/values.yaml index e8a3d055..6383be96 100644 --- a/addons/airflow/2/chart/airflow/values.yaml +++ b/addons/airflow/2/chart/airflow/values.yaml @@ -985,7 +985,7 @@ worker: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## @section Airflow git sync parameters diff --git a/addons/clickhouse/24/chart/clickhouse/values.yaml b/addons/clickhouse/24/chart/clickhouse/values.yaml index d14ffc82..76726030 100644 --- a/addons/clickhouse/24/chart/clickhouse/values.yaml +++ b/addons/clickhouse/24/chart/clickhouse/values.yaml @@ -983,7 +983,7 @@ persistence: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## 'volumePermissions' init container parameters diff --git a/addons/flink/1.17/chart/flink/values.yaml b/addons/flink/1.17/chart/flink/values.yaml index 01d5404b..78735590 100644 --- a/addons/flink/1.17/chart/flink/values.yaml +++ b/addons/flink/1.17/chart/flink/values.yaml @@ -895,5 +895,5 @@ taskmanager: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete diff --git a/addons/kafka/3.6/chart/kafka/values.yaml b/addons/kafka/3.6/chart/kafka/values.yaml index d6e4e2b2..d2fb2ff7 100644 --- a/addons/kafka/3.6/chart/kafka/values.yaml +++ b/addons/kafka/3.6/chart/kafka/values.yaml @@ -835,7 +835,7 @@ controller: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Log Persistence parameters ## @@ -1244,7 +1244,7 @@ broker: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Log Persistence parameters ## diff --git a/addons/kvrocks/2.8/chart/kvrocks/values.yaml b/addons/kvrocks/2.8/chart/kvrocks/values.yaml index de27b7e8..cce986d4 100644 --- a/addons/kvrocks/2.8/chart/kvrocks/values.yaml +++ b/addons/kvrocks/2.8/chart/kvrocks/values.yaml @@ -440,7 +440,7 @@ master: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Rkvrocksedis; master service parameters ## @@ -803,7 +803,7 @@ replica: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## kvrocks; replicas service parameters ## @@ -1061,7 +1061,7 @@ sentinel: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## kvrocks; Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ diff --git a/addons/minio/2023/chart/minio/values.yaml b/addons/minio/2023/chart/minio/values.yaml index c925090d..e40fa496 100644 --- a/addons/minio/2023/chart/minio/values.yaml +++ b/addons/minio/2023/chart/minio/values.yaml @@ -894,7 +894,7 @@ persistence: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## @section Volume Permissions parameters diff --git a/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml b/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml index 6a385860..c45daffe 100644 --- a/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml +++ b/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml @@ -29,7 +29,7 @@ spec: podManagementPolicy: {{ .Values.podManagementPolicy }} persistentVolumeClaimRetentionPolicy: whenDeleted: Delete - whenScaled: Delete + whenScaled: Retain replicas: {{ .Values.replicaCount }} {{- if .Values.updateStrategy }} updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index 876e51cb..01eb06e9 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -465,7 +465,7 @@ primary: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete persistence: ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir diff --git a/addons/opensearch/2.10/chart/opensearch/values.yaml b/addons/opensearch/2.10/chart/opensearch/values.yaml index eccb0e66..1b467a2b 100644 --- a/addons/opensearch/2.10/chart/opensearch/values.yaml +++ b/addons/opensearch/2.10/chart/opensearch/values.yaml @@ -774,7 +774,7 @@ master: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Pods Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ @@ -1180,7 +1180,7 @@ data: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Pods Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index 13582f6c..1bed7475 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -344,7 +344,7 @@ logicalBackup: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete persistentVolume: enabled: true diff --git a/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml b/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml index 07c2b123..5446ab0f 100644 --- a/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml +++ b/addons/rabbitmq/3.12/chart/rabbitmq/values.yaml @@ -844,7 +844,7 @@ persistentVolumeClaimRetentionPolicy: enabled: true ## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced ## - whenScaled: Delete + whenScaled: Retain ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted ## whenDeleted: Delete diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml index 4add89af..bc946d3d 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml @@ -336,7 +336,7 @@ persistence: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Init containers parameters: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup diff --git a/addons/redis/7.0/chart/redis/values.yaml b/addons/redis/7.0/chart/redis/values.yaml index 48049bf1..99759d05 100644 --- a/addons/redis/7.0/chart/redis/values.yaml +++ b/addons/redis/7.0/chart/redis/values.yaml @@ -453,7 +453,7 @@ master: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Redis™ master service parameters ## @@ -823,7 +823,7 @@ replica: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Redis™ replicas service parameters ## @@ -1081,7 +1081,7 @@ sentinel: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## Redis™ Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ diff --git a/addons/seaweedfs/3/chart/seaweedfs/values.yaml b/addons/seaweedfs/3/chart/seaweedfs/values.yaml index 582d5eb4..f5d0bd8b 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/values.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/values.yaml @@ -205,7 +205,7 @@ master: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete filer: @@ -294,7 +294,7 @@ filer: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete volume: @@ -385,7 +385,7 @@ volume: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete cronjob: diff --git a/addons/spark/3.4/chart/spark/values.yaml b/addons/spark/3.4/chart/spark/values.yaml index 415e3cbe..73bd39a8 100644 --- a/addons/spark/3.4/chart/spark/values.yaml +++ b/addons/spark/3.4/chart/spark/values.yaml @@ -430,7 +430,7 @@ master: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## @section Spark worker parameters ## @@ -775,7 +775,7 @@ worker: ## persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## @section Security parameters diff --git a/addons/zookeeper/3.9/chart/zookeeper/values.yaml b/addons/zookeeper/3.9/chart/zookeeper/values.yaml index e280599b..c2c22970 100644 --- a/addons/zookeeper/3.9/chart/zookeeper/values.yaml +++ b/addons/zookeeper/3.9/chart/zookeeper/values.yaml @@ -644,7 +644,7 @@ persistence: ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted persistentVolumeClaimRetentionPolicy: enabled: true - whenScaled: Delete + whenScaled: Retain whenDeleted: Delete ## @section Volume Permissions parameters ## From 0a047b429714f59b20f227c7fa1594ee0c044d1e Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 14 Oct 2024 18:17:01 +0800 Subject: [PATCH 07/93] fix(mysql-cluster): router request limits (#82) --- .../8.0/plans/standard-16c64g400/values.yaml | 18 +++++++++--------- .../8.0/plans/standard-32c128g800/values.yaml | 17 ++++++++--------- .../8.0/plans/standard-4c16g100/values.yaml | 18 +++++++++--------- .../8.0/plans/standard-8c32g200/values.yaml | 18 +++++++++--------- 4 files changed, 35 insertions(+), 36 deletions(-) diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml index 78f05cfe..f346d776 100644 --- a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml @@ -21,15 +21,6 @@ primary: cpu: 8000m memory: 32Gi -router: - resources: - limits: - cpu: 1600m - memory: 2Gi - requests: - cpu: 1600m - memory: 2Gi - ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims @@ -67,3 +58,12 @@ router: ## app: my-app ## selector: {} + +router: + resources: + limits: + cpu: 1600m + memory: 2Gi + requests: + cpu: 1600m + memory: 2Gi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml index ead2abb1..c25affff 100644 --- a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml @@ -21,15 +21,6 @@ primary: cpu: 16000m memory: 64Gi -router: - resources: - limits: - cpu: 3200m - memory: 4Gi - requests: - cpu: 3200m - memory: 4Gi - ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims @@ -67,3 +58,11 @@ router: ## app: my-app ## selector: {} +router: + resources: + limits: + cpu: 3200m + memory: 4Gi + requests: + cpu: 3200m + memory: 4Gi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml index da0cbee1..68412b31 100644 --- a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml @@ -21,15 +21,6 @@ primary: cpu: 4000m memory: 16Gi -router: - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims @@ -67,3 +58,12 @@ router: ## app: my-app ## selector: {} + +router: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml index 2b3d28e7..4061331e 100644 --- a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml @@ -20,15 +20,6 @@ primary: requests: cpu: 4000m memory: 16Gi - -router: - resources: - limits: - cpu: 800m - memory: 1024Mi - requests: - cpu: 800m - memory: 1024Mi ## @section Persistence parameters @@ -67,3 +58,12 @@ router: ## app: my-app ## selector: {} + +router: + resources: + limits: + cpu: 800m + memory: 1024Mi + requests: + cpu: 800m + memory: 1024Mi \ No newline at end of file From c1b0a7a0af22954e3155d270cafbb700682664a6 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 15 Oct 2024 18:02:43 +0800 Subject: [PATCH 08/93] chore(redis-cluster): meta useAOFPersistence --- addons/redis-cluster/7.0/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/addons/redis-cluster/7.0/meta.yaml b/addons/redis-cluster/7.0/meta.yaml index 5cd4b2fd..8fd165b0 100644 --- a/addons/redis-cluster/7.0/meta.yaml +++ b/addons/redis-cluster/7.0/meta.yaml @@ -18,6 +18,9 @@ allow_parameters: - name: "password" required: false description: "password config for values.yaml" +- name: "redis.useAOFPersistence" + required: false + description: "redis.useAOFPersistence config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" From 38b6cc5f08d44da3477cdde1a7ba33b359bad3f4 Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 16 Oct 2024 16:54:35 +0800 Subject: [PATCH 09/93] fix(mysql-cluster): remove metrics probe --- addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index 01eb06e9..f0eb3a45 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -1112,6 +1112,9 @@ metrics: - --collect.perf_schema.replication_group_members - --collect.perf_schema.replication_group_member_stats - --collect.perf_schema.replication_applier_status_by_worker + - --collect.auto_increment.columns + - --collect.binlog_size + - --collect.engine_innodb_status secondary: [] ## Mysqld Prometheus exporter resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ @@ -1158,7 +1161,7 @@ metrics: ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: - enabled: true + enabled: false initialDelaySeconds: 120 periodSeconds: 10 timeoutSeconds: 10 @@ -1174,7 +1177,7 @@ metrics: ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: - enabled: true + enabled: false initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 10 From cf06d4d920c9c358ba3fd0fa0c1791e4b39f99ca Mon Sep 17 00:00:00 2001 From: lijianguo Date: Wed, 16 Oct 2024 10:45:33 +0800 Subject: [PATCH 10/93] chore(redis): add 128G plan --- .../redis/7.0/plans/standard-131072/bind.yaml | 108 ++++++++++++++ .../create-instance-schema.json | 12 ++ .../redis/7.0/plans/standard-131072/meta.yaml | 6 + .../7.0/plans/standard-131072/values.yaml | 138 ++++++++++++++++++ .../7.0/plans/standard-16384/values.yaml | 2 +- .../7.0/plans/standard-32768/values.yaml | 2 +- .../7.0/plans/standard-65536/values.yaml | 2 +- .../redis/7.0/plans/standard-8192/values.yaml | 2 +- 8 files changed, 268 insertions(+), 4 deletions(-) create mode 100644 addons/redis/7.0/plans/standard-131072/bind.yaml create mode 100644 addons/redis/7.0/plans/standard-131072/create-instance-schema.json create mode 100644 addons/redis/7.0/plans/standard-131072/meta.yaml create mode 100644 addons/redis/7.0/plans/standard-131072/values.yaml diff --git a/addons/redis/7.0/plans/standard-131072/bind.yaml b/addons/redis/7.0/plans/standard-131072/bind.yaml new file mode 100644 index 00000000..08936377 --- /dev/null +++ b/addons/redis/7.0/plans/standard-131072/bind.yaml @@ -0,0 +1,108 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-redis")].port }' + + - name: REDIS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-redis")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-redis")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + - name: MASTER_NAME + value: {{ .Values.sentinel.masterSet }} + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.redis-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: REDIS_SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.redis-password }' + {{- end }} diff --git a/addons/redis/7.0/plans/standard-131072/create-instance-schema.json b/addons/redis/7.0/plans/standard-131072/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/redis/7.0/plans/standard-131072/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-131072/meta.yaml b/addons/redis/7.0/plans/standard-131072/meta.yaml new file mode 100644 index 00000000..6d00e6e1 --- /dev/null +++ b/addons/redis/7.0/plans/standard-131072/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-131072" +id: e5b7900e-2311-4650-a580-1943591622a1 +description: "Redis standard-131072 plan which limit resources memory size 128Gi." +displayName: "standard-131072" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/redis/7.0/plans/standard-131072/values.yaml b/addons/redis/7.0/plans/standard-131072/values.yaml new file mode 100644 index 00000000..3c92b5f7 --- /dev/null +++ b/addons/redis/7.0/plans/standard-131072/values.yaml @@ -0,0 +1,138 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-redis-standard-131072 + +## @section Redis™ master configuration parameters +## + +master: + ## Redis™ master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis™ master containers + ## @param master.resources.requests The requested resources for the Redis™ master containers + ## + resources: + limits: + cpu: 4 + memory: 128Gi + requests: + cpu: 2 + memory: 64Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis™ master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Redis™ master containers + ## NOTE: Useful when using different Redis™ images + ## + path: /drycc/redis/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 256Gi + +## @section Redis™ replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis™ replicas to deploy + ## + replicaCount: 3 + + ## Redis™ replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis™ replicas containers + ## @param replica.resources.requests The requested resources for the Redis™ replicas containers + ## + resources: + limits: + cpu: 4 + memory: 128Gi + requests: + cpu: 2 + memory: 64Gi + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis™ replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Redis™ replicas containers + ## NOTE: Useful when using different Redis™ images + ## + path: /drycc/redis/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 256Gi + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis™ sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 8Gi + + ## Redis™ Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis™ Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis™ Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi + +proxy: + ## Redis™ Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Redis™ Sentinel containers + ## @param proxy.resources.requests The requested resources for the Redis™ Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-16384/values.yaml b/addons/redis/7.0/plans/standard-16384/values.yaml index 6e0edd78..cf9c77cd 100644 --- a/addons/redis/7.0/plans/standard-16384/values.yaml +++ b/addons/redis/7.0/plans/standard-16384/values.yaml @@ -108,7 +108,7 @@ sentinel: - ReadWriteOnce ## @param sentinel.persistence.size Persistent Volume size ## - size: 32Gi + size: 8Gi ## Redis™ Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ diff --git a/addons/redis/7.0/plans/standard-32768/values.yaml b/addons/redis/7.0/plans/standard-32768/values.yaml index c9e3d8ea..0b83c105 100644 --- a/addons/redis/7.0/plans/standard-32768/values.yaml +++ b/addons/redis/7.0/plans/standard-32768/values.yaml @@ -108,7 +108,7 @@ sentinel: - ReadWriteOnce ## @param sentinel.persistence.size Persistent Volume size ## - size: 64Gi + size: 8Gi ## Redis™ Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ diff --git a/addons/redis/7.0/plans/standard-65536/values.yaml b/addons/redis/7.0/plans/standard-65536/values.yaml index 4405db98..b47da715 100644 --- a/addons/redis/7.0/plans/standard-65536/values.yaml +++ b/addons/redis/7.0/plans/standard-65536/values.yaml @@ -108,7 +108,7 @@ sentinel: - ReadWriteOnce ## @param sentinel.persistence.size Persistent Volume size ## - size: 128Gi + size: 8Gi ## Redis™ Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ diff --git a/addons/redis/7.0/plans/standard-8192/values.yaml b/addons/redis/7.0/plans/standard-8192/values.yaml index 1ace60e4..8c4bc3cb 100644 --- a/addons/redis/7.0/plans/standard-8192/values.yaml +++ b/addons/redis/7.0/plans/standard-8192/values.yaml @@ -108,7 +108,7 @@ sentinel: - ReadWriteOnce ## @param sentinel.persistence.size Persistent Volume size ## - size: 16Gi + size: 8Gi ## Redis™ Sentinel resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ From bbd7fd20e806957672e559e17c545eb6006a70bd Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 17 Oct 2024 14:51:56 +0800 Subject: [PATCH 11/93] chore(redis): config disableCommands --- addons/redis/7.0/meta.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/addons/redis/7.0/meta.yaml b/addons/redis/7.0/meta.yaml index dfdded88..43f88697 100644 --- a/addons/redis/7.0/meta.yaml +++ b/addons/redis/7.0/meta.yaml @@ -27,12 +27,18 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.disableCommands" + required: false + description: "master disableCommands config for values.yaml" - name: "replica.service.type" required: false description: "replica service type config for values.yaml" - name: "sentinel.service.type" required: false description: "sentinel service type config for values.yaml" +- name: "replica.disableCommands" + required: false + description: "replica.disableCommands config for values.yaml" - name: "sentinel.enabled" required: false description: "sentinel enabled config for values.yaml" From efaa5ed783085a7e3c997f867d9fb1f4e888291a Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 21 Oct 2024 18:06:00 +0800 Subject: [PATCH 12/93] chore(mysql-cluster): optimisation metrics (#84) --- addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index f0eb3a45..34a406b2 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -1112,9 +1112,7 @@ metrics: - --collect.perf_schema.replication_group_members - --collect.perf_schema.replication_group_member_stats - --collect.perf_schema.replication_applier_status_by_worker - - --collect.auto_increment.columns - - --collect.binlog_size - - --collect.engine_innodb_status + secondary: [] ## Mysqld Prometheus exporter resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ From 71d6128924744946733c8d8e5af7304aa2b8ddce Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 22 Oct 2024 10:27:51 +0800 Subject: [PATCH 13/93] chore(prometheus): drop mysql metrics (#85) --- .../prometheus/templates/_scrape_config.tpl | 74 ------------------- .../prometheus/2/chart/prometheus/values.yaml | 2 - 2 files changed, 76 deletions(-) diff --git a/addons/prometheus/2/chart/prometheus/templates/_scrape_config.tpl b/addons/prometheus/2/chart/prometheus/templates/_scrape_config.tpl index 7bfe50c1..3dc59c11 100644 --- a/addons/prometheus/2/chart/prometheus/templates/_scrape_config.tpl +++ b/addons/prometheus/2/chart/prometheus/templates/_scrape_config.tpl @@ -132,10 +132,6 @@ relabel_configs: - source_labels: [__meta_kubernetes_pod_node_name] action: replace target_label: node - - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] - separator: ; - regex: mysql - action: drop {{- end -}} {{- define "addons.kubernetes-service-endpoints-slow" -}} @@ -179,10 +175,6 @@ relabel_configs: - source_labels: [__meta_kubernetes_pod_node_name] action: replace target_label: node - - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] - separator: ; - regex: mysql - action: drop {{- end -}} {{- define "addons.kubernetes-pods" -}} @@ -236,10 +228,6 @@ relabel_configs: - source_labels: [__meta_kubernetes_pod_node_name] action: replace target_label: node - - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] - separator: ; - regex: mysql - action: drop {{- end -}} {{- define "addons.kubernetes-pods-slow" -}} @@ -292,68 +280,6 @@ relabel_configs: - source_labels: [__meta_kubernetes_pod_node_name] action: replace target_label: node - - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] - separator: ; - regex: mysql - action: drop {{- end -}} -{{- define "addons.mysql-metrics" -}} -honor_labels: true -kubernetes_sd_configs: - - role: endpoints - namespaces: - own_namespace: true - names: - - {{ include "common.names.namespace" .context }} -params: - collect[]: - - informationSchema.processlist - - performanceSchema.replication_group_members - - performanceSchema.replication_group_member_stats - - performanceSchema.replication_applier_status_by_worker - - auto_increment.columns - - binlog_size - -relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: drop - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: (.+?)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: service - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] - separator: ; - regex: mysql - action: keep -{{- end -}} \ No newline at end of file diff --git a/addons/prometheus/2/chart/prometheus/values.yaml b/addons/prometheus/2/chart/prometheus/values.yaml index d93277a7..c18a66fa 100644 --- a/addons/prometheus/2/chart/prometheus/values.yaml +++ b/addons/prometheus/2/chart/prometheus/values.yaml @@ -649,8 +649,6 @@ server: {{- include "addons.kubernetes-pods" (dict "context" $) | nindent 4 }} - job_name: kubernetes-pods-slow {{- include "addons.kubernetes-pods-slow" (dict "context" $) | nindent 4 }} - - job_name: mysql-metrics - {{- include "addons.mysql-metrics" (dict "context" $) | nindent 4 }} {{- end }} {{ $scheme := .Values.drycc.scheme }} From e0ef22444e2f30252842b80742b927872030432d Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 22 Oct 2024 11:13:56 +0800 Subject: [PATCH 14/93] fix(prometheus): configmap-reload images address (#86) --- addons/prometheus/2/chart/prometheus/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/addons/prometheus/2/chart/prometheus/values.yaml b/addons/prometheus/2/chart/prometheus/values.yaml index c18a66fa..a114035b 100644 --- a/addons/prometheus/2/chart/prometheus/values.yaml +++ b/addons/prometheus/2/chart/prometheus/values.yaml @@ -1589,8 +1589,8 @@ configmapReload: ## configmap-reload container image ## image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 + repository: registry.drycc.cc/drycc-addons/configmap-reload + tag: 0 pullPolicy: IfNotPresent # containerPort: 9533 From fef305cdd9d2c45d3e32a3d0dabc4d8da14014ac Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 22 Oct 2024 16:23:58 +0800 Subject: [PATCH 15/93] chore(rabbitmq): add mqtt config --- addons/rabbitmq/3.12/meta.yaml | 15 +++++++++++++++ .../3.12/plans/standard-16c32g3w/bind.yaml | 4 ++-- .../rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml | 4 ++-- .../rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml | 4 ++-- .../3.12/plans/standard-8c16g3w/bind.yaml | 4 ++-- 5 files changed, 23 insertions(+), 8 deletions(-) diff --git a/addons/rabbitmq/3.12/meta.yaml b/addons/rabbitmq/3.12/meta.yaml index f28c58ca..d65534de 100644 --- a/addons/rabbitmq/3.12/meta.yaml +++ b/addons/rabbitmq/3.12/meta.yaml @@ -15,12 +15,27 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "initScripts" + required: false + description: "initScripts config for values.yaml" +- name: "extraPlugins" + required: false + description: "extraPlugins config for values.yaml" +- name: "extraContainerPorts" + required: false + description: "extraContainerPorts config for values.yaml" +- name: "extraPorts" + required: false + description: "extraPorts config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.extraPorts" + required: false + description: "service.extraPorts config for values.yaml" - name: "metrics.enabled" required: false description: "metrics enabled or not config for values.yaml" diff --git a/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml index 4750fb59..a17a74d4 100644 --- a/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml @@ -21,7 +21,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' {{- end }} {{- if .Values.auth.tls.enabled }} @@ -29,7 +29,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp-ssl")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp-ssl")].port }' {{- end }} {{- if .Values.auth.username }} diff --git a/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml index 4750fb59..a17a74d4 100644 --- a/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml @@ -21,7 +21,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' {{- end }} {{- if .Values.auth.tls.enabled }} @@ -29,7 +29,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp-ssl")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp-ssl")].port }' {{- end }} {{- if .Values.auth.username }} diff --git a/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml index 4750fb59..a17a74d4 100644 --- a/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml @@ -21,7 +21,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' {{- end }} {{- if .Values.auth.tls.enabled }} @@ -29,7 +29,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp-ssl")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp-ssl")].port }' {{- end }} {{- if .Values.auth.username }} diff --git a/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml index 4750fb59..a17a74d4 100644 --- a/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml @@ -21,7 +21,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' {{- end }} {{- if .Values.auth.tls.enabled }} @@ -29,7 +29,7 @@ credential: valueFrom: serviceRef: name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.targetPort=="amqp-ssl")].port }' + jsonpath: '{ .spec.ports[?(@.name=="amqp-ssl")].port }' {{- end }} {{- if .Values.auth.username }} From 2e6c77553c494e7eea98bac5a922d82b3fd1acfe Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 23 Oct 2024 11:06:28 +0800 Subject: [PATCH 16/93] chore(fluentbit): timezone (#87) --- addons/fluentbit/2/chart/fluentbit/templates/daemonset.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/addons/fluentbit/2/chart/fluentbit/templates/daemonset.yaml b/addons/fluentbit/2/chart/fluentbit/templates/daemonset.yaml index 215c7ed8..601a15c2 100644 --- a/addons/fluentbit/2/chart/fluentbit/templates/daemonset.yaml +++ b/addons/fluentbit/2/chart/fluentbit/templates/daemonset.yaml @@ -81,7 +81,13 @@ spec: mountPath: /var/log - name: {{ include "fluentbit.configMap" . }} mountPath: /opt/drycc/fluent-bit/etc/fluent-bit + - name: timezone + mountPath: /etc/localtime + volumes: + - name: timezone + hostPath: + path: /usr/share/zoneinfo/Asia/Shanghai - name: data emptyDir: {} - name: varlog From 663272aaece1e24af7541cb34c2fe49032931214 Mon Sep 17 00:00:00 2001 From: Eamon Date: Fri, 25 Oct 2024 16:41:36 +0800 Subject: [PATCH 17/93] chore(mysql-cluster): bind add domain --- addons/mysql-cluster/8.0/plans/standard-16c64g400/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-2c4g20/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-2c8g50/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-32c128g800/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-4c16g100/bind.yaml | 2 ++ addons/mysql-cluster/8.0/plans/standard-8c32g200/bind.yaml | 2 ++ 7 files changed, 14 insertions(+) diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-16c64g400/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-16c64g400/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-16c64g400/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-2c8g50/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-2c8g50/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c8g50/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c8g50/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-32c128g800/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-32c128g800/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-32c128g800/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-4c16g100/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-4c16g100/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-4c16g100/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-8c32g200/bind.yaml index 6a7147c9..a0668383 100644 --- a/addons/mysql-cluster/8.0/plans/standard-8c32g200/bind.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-8c32g200/bind.yaml @@ -6,6 +6,8 @@ credential: name: {{ include "common.names.fullname" . }}-router jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} + - name: DOMAIN + value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: HOST valueFrom: serviceRef: From ac39df86fd18de1651fe05b57c2ffb09a11dc9a0 Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 28 Oct 2024 09:37:01 +0800 Subject: [PATCH 18/93] chore(postgresql-cluster): bind add domain --- .../15/chart/postgresql-cluster/values.yaml | 3 ++- .../15/plans/standard-16c64g400/bind.yaml | 6 +++++- .../postgresql-cluster/15/plans/standard-1c2g10/bind.yaml | 6 +++++- .../postgresql-cluster/15/plans/standard-2c4g20/bind.yaml | 6 +++++- .../postgresql-cluster/15/plans/standard-2c8g50/bind.yaml | 6 +++++- .../15/plans/standard-32c128g800/bind.yaml | 6 +++++- .../15/plans/standard-32c64g4000/bind.yaml | 6 +++++- .../postgresql-cluster/15/plans/standard-4c16g100/bind.yaml | 6 +++++- .../postgresql-cluster/15/plans/standard-8c32g200/bind.yaml | 6 +++++- 9 files changed, 42 insertions(+), 9 deletions(-) diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index 1bed7475..efa6be7a 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -432,4 +432,5 @@ networkPolicy: ## (with the correct destination port). ## allowCurrentNamespace: true - allowNamespaces: \ No newline at end of file + allowNamespaces: +clusterDomain: cluster.local \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml b/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml index e41b7b31..43111931 100644 --- a/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml index 0283dff3..5fc257f9 100644 --- a/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml @@ -10,7 +10,11 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }}-repl jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - name: MASTER_HOST valueFrom: serviceRef: From a8c3f018832fa4cb68dcf673a985f849524e5f5d Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 4 Nov 2024 11:23:10 +0800 Subject: [PATCH 19/93] chore(addons): add ydb --- addons/index.yaml | 5 +- .../2024/chart/yugabytedb/Chart.yaml | 17 + .../2024/chart/yugabytedb/README.md | 1 + .../2024/chart/yugabytedb/app-readme.md | 1 + .../chart/yugabytedb/expose-all-shared.yaml | 21 + .../2024/chart/yugabytedb/expose-all.yaml | 31 + .../chart/yugabytedb/generate_kubeconfig.py | 220 +++++ .../chart/yugabytedb/openshift.values.yaml | 4 + .../2024/chart/yugabytedb/questions.yaml | 174 ++++ .../2024/chart/yugabytedb/templates/NOTES.txt | 29 + .../chart/yugabytedb/templates/_helpers.tpl | 548 +++++++++++ .../2024/chart/yugabytedb/templates/bind.yaml | 33 + .../yugabytedb/templates/certificates.yaml | 150 +++ .../templates/debug_config_map.yaml | 23 + .../hooks/setup-credentials-job.yaml | 80 ++ .../templates/master-servicemonitor.yaml | 51 + .../multicluster/common-tserver-service.yaml | 23 + .../multicluster/mcs-service-export.yaml | 21 + .../multicluster/service-per-pod.yaml | 34 + .../chart/yugabytedb/templates/service.yaml | 898 ++++++++++++++++++ .../setup-credentials-configmap.yaml | 262 +++++ .../templates/tserver-servicemonitor.yaml | 115 +++ .../2024/chart/yugabytedb/tests/README.md | 19 + .../tests/test_affinity_merges.yaml | 191 ++++ .../tests/values_affinity_merge.yaml | 66 ++ .../2024/chart/yugabytedb/values.yaml | 691 ++++++++++++++ .../2024/chart/yugabytedb/yugabyte-rbac.yaml | 19 + addons/yugabytedb/2024/meta.yaml | 24 + .../2024/plans/standard-1c2g3w10/bind.yaml | 33 + .../create-instance-schema.json | 12 + .../2024/plans/standard-1c2g3w10/meta.yaml | 6 + .../2024/plans/standard-1c2g3w10/values.yaml | 31 + .../2024/plans/standard-2c4g3w20/bind.yaml | 37 + .../create-instance-schema.json | 12 + .../2024/plans/standard-2c4g3w20/meta.yaml | 6 + .../2024/plans/standard-2c4g3w20/values.yaml | 31 + 36 files changed, 3918 insertions(+), 1 deletion(-) create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/Chart.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/README.md create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/app-readme.md create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/expose-all-shared.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/expose-all.yaml create mode 100755 addons/yugabytedb/2024/chart/yugabytedb/generate_kubeconfig.py create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/openshift.values.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/questions.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/NOTES.txt create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/_helpers.tpl create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/bind.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/certificates.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/debug_config_map.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/hooks/setup-credentials-job.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/master-servicemonitor.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/common-tserver-service.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/mcs-service-export.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/service-per-pod.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/service.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/setup-credentials-configmap.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/templates/tserver-servicemonitor.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/tests/README.md create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/tests/test_affinity_merges.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/tests/values_affinity_merge.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/values.yaml create mode 100644 addons/yugabytedb/2024/chart/yugabytedb/yugabyte-rbac.yaml create mode 100644 addons/yugabytedb/2024/meta.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-1c2g3w10/bind.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json create mode 100644 addons/yugabytedb/2024/plans/standard-1c2g3w10/meta.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-1c2g3w10/values.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-2c4g3w20/bind.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json create mode 100644 addons/yugabytedb/2024/plans/standard-2c4g3w20/meta.yaml create mode 100644 addons/yugabytedb/2024/plans/standard-2c4g3w20/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index a3a86454..a3cf0a27 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -62,4 +62,7 @@ entries: description: "ClickHouse is the fastest and most resource efficient open-source database for real-time apps and analytics." kvrocks: - version: 2.8 - description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." \ No newline at end of file + description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." + yugabytedb: + - version: 2024 + description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " \ No newline at end of file diff --git a/addons/yugabytedb/2024/chart/yugabytedb/Chart.yaml b/addons/yugabytedb/2024/chart/yugabytedb/Chart.yaml new file mode 100644 index 00000000..127d00b5 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v2 +name: yugabyte +version: 2024.1.3 +appVersion: 2024.1.3.0-b105 +kubeVersion: ">=1.17.0-0" +home: https://www.yugabyte.com +description: YugabyteDB is the high-performance distributed SQL database for building global, internet-scale apps. +icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 +sources: +- https://github.com/yugabyte/yugabyte-db +maintainers: +- name: Sanketh Indarapu + email: sanketh@yugabyte.com +- name: Govardhan Reddy Jalla + email: gjalla@yugabyte.com +annotations: + charts.openshift.io/name: yugabyte \ No newline at end of file diff --git a/addons/yugabytedb/2024/chart/yugabytedb/README.md b/addons/yugabytedb/2024/chart/yugabytedb/README.md new file mode 100644 index 00000000..9528cff1 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/README.md @@ -0,0 +1 @@ +YugabyteDB can be deployed in various Kubernetes configurations (including single zone, multi-zone and multi-cluster) using this Helm Chart. Detailed documentation is available in [YugabyteDB Docs for Kubernetes Deployments](https://docs.yugabyte.com/latest/deploy/kubernetes/). diff --git a/addons/yugabytedb/2024/chart/yugabytedb/app-readme.md b/addons/yugabytedb/2024/chart/yugabytedb/app-readme.md new file mode 100644 index 00000000..27ccaf2b --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/app-readme.md @@ -0,0 +1 @@ +This chart bootstraps an RF3 YugabyteDB version 2024.1.3.0-b105 cluster using the Helm Package Manager. diff --git a/addons/yugabytedb/2024/chart/yugabytedb/expose-all-shared.yaml b/addons/yugabytedb/2024/chart/yugabytedb/expose-all-shared.yaml new file mode 100644 index 00000000..fae02347 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/expose-all-shared.yaml @@ -0,0 +1,21 @@ +serviceEndpoints: + - name: "yb-master-service" + type: LoadBalancer + app: "yb-master" + ports: + ui: "7000" + + - name: "yb-tserver-service" + type: LoadBalancer + app: "yb-tserver" + ports: + yql-port: "9042" + yedis-port: "6379" + ysql-port: "5433" + + - name: "yugabyted-ui-service" + type: LoadBalancer + app: "yb-master" + sessionAffinity: ClientIP + ports: + yugabyted-ui: "15433" diff --git a/addons/yugabytedb/2024/chart/yugabytedb/expose-all.yaml b/addons/yugabytedb/2024/chart/yugabytedb/expose-all.yaml new file mode 100644 index 00000000..ac3edf56 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/expose-all.yaml @@ -0,0 +1,31 @@ +serviceEndpoints: + - name: "yb-master-ui" + type: LoadBalancer + app: "yb-master" + ports: + ui: "7000" + + - name: "yql-service" + type: LoadBalancer + app: "yb-tserver" + ports: + yql-port: "9042" + + - name: "yedis-service" + type: LoadBalancer + app: "yb-tserver" + ports: + yedis-port: "6379" + + - name: "ysql-service" + type: LoadBalancer + app: "yb-tserver" + ports: + ysql-port: "5433" + + - name: "yugabyted-ui-service" + type: LoadBalancer + app: "yb-master" + sessionAffinity: ClientIP + ports: + yugabyted-ui: "15433" diff --git a/addons/yugabytedb/2024/chart/yugabytedb/generate_kubeconfig.py b/addons/yugabytedb/2024/chart/yugabytedb/generate_kubeconfig.py new file mode 100755 index 00000000..0d91739f --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/generate_kubeconfig.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# Copyright (c) YugaByte, Inc. + +# This script would generate a kubeconfig for the given servie account +# by fetching the cluster information and also add the service account +# token for the authentication purpose. + +import argparse +from subprocess import check_output +from sys import exit +import json +import base64 +import tempfile +import time +import os.path + + +def run_command(command_args, namespace=None, as_json=True, log_command=True): + command = ["kubectl"] + if namespace: + command.extend(["--namespace", namespace]) + command.extend(command_args) + if as_json: + command.extend(["-o", "json"]) + if log_command: + print("Running command: {}".format(" ".join(command))) + output = check_output(command) + if as_json: + return json.loads(output) + else: + return output.decode("utf8") + + +def create_sa_token_secret(directory, sa_name, namespace): + """Creates a service account token secret for sa_name in + namespace. Returns the name of the secret created. + + Ref: + https://k8s.io/docs/concepts/configuration/secret/#service-account-token-secrets + + """ + token_secret = { + "apiVersion": "v1", + "data": { + "do-not-delete-used-for-yugabyte-anywhere": "MQ==", + }, + "kind": "Secret", + "metadata": { + "annotations": { + "kubernetes.io/service-account.name": sa_name, + }, + "name": sa_name, + }, + "type": "kubernetes.io/service-account-token", + } + token_secret_file_name = os.path.join(directory, "token_secret.yaml") + with open(token_secret_file_name, "w") as token_secret_file: + json.dump(token_secret, token_secret_file) + run_command(["apply", "-f", token_secret_file_name], namespace) + return sa_name + + +def get_secret_data(secret, namespace): + """Returns the secret in JSON format if it has ca.crt and token in + it, else returns None. It retries 3 times with 1 second timeout + for the secret to be populated with this data. + + """ + secret_data = None + num_retries = 5 + timeout = 2 + while True: + secret_json = run_command(["get", "secret", secret], namespace) + if "ca.crt" in secret_json["data"] and "token" in secret_json["data"]: + secret_data = secret_json + break + + num_retries -= 1 + if num_retries == 0: + break + print( + "Secret '{}' is not populated. Sleep {}s, ({} retries left)".format( + secret, timeout, num_retries + ) + ) + time.sleep(timeout) + return secret_data + + +def get_secrets_for_sa(sa_name, namespace): + """Returns a list of all service account token secrets associated + with the given sa_name in the namespace. + + """ + secrets = run_command( + [ + "get", + "secret", + "--field-selector", + "type=kubernetes.io/service-account-token", + "-o", + 'jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name == "' + + sa_name + + '")].metadata.name}"', + ], + namespace, + as_json=False, + ) + return secrets.strip('"').split() + + +parser = argparse.ArgumentParser(description="Generate KubeConfig with Token") +parser.add_argument("-s", "--service_account", help="Service Account name", required=True) +parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="kube-system") +parser.add_argument("-c", "--context", help="kubectl context") +parser.add_argument("-o", "--output_file", help="output file path") +args = vars(parser.parse_args()) + +# if the context is not provided we use the current-context +context = args["context"] +if context is None: + context = run_command(["config", "current-context"], args["namespace"], as_json=False) + +cluster_attrs = run_command( + ["config", "get-contexts", context.strip(), "--no-headers"], args["namespace"], as_json=False +) + +cluster_name = cluster_attrs.strip().split()[2] +endpoint = run_command( + [ + "config", + "view", + "-o", + 'jsonpath="{.clusters[?(@.name =="' + cluster_name + '")].cluster.server}"', + ], + args["namespace"], + as_json=False, +) +service_account_info = run_command(["get", "sa", args["service_account"]], args["namespace"]) + +tmpdir = tempfile.TemporaryDirectory() + +# Get the token and ca.crt from service account secret. +sa_secrets = list() + +# Get secrets specified in the service account, there can be multiple +# of them, and not all are service account token secrets. +if "secrets" in service_account_info: + sa_secrets = [secret["name"] for secret in service_account_info["secrets"]] + +# Find the existing additional service account token secrets +sa_secrets.extend(get_secrets_for_sa(args["service_account"], args["namespace"])) + +secret_data = None +for secret in sa_secrets: + secret_data = get_secret_data(secret, args["namespace"]) + if secret_data is not None: + break + +# Kubernetes 1.22+ doesn't create the service account token secret by +# default, we have to create one. +if secret_data is None: + print("No usable secret found for '{}', creating one.".format(args["service_account"])) + token_secret = create_sa_token_secret(tmpdir.name, args["service_account"], args["namespace"]) + secret_data = get_secret_data(token_secret, args["namespace"]) + if secret_data is None: + exit( + "Failed to generate kubeconfig: No usable credentials found for '{}'.".format( + args["service_account"] + ) + ) + + +context_name = "{}-{}".format(args["service_account"], cluster_name) +kube_config = args["output_file"] +if not kube_config: + kube_config = "/tmp/{}.conf".format(args["service_account"]) + + +ca_crt_file_name = os.path.join(tmpdir.name, "ca.crt") +ca_crt_file = open(ca_crt_file_name, "wb") +ca_crt_file.write(base64.b64decode(secret_data["data"]["ca.crt"])) +ca_crt_file.close() + +# create kubeconfig entry +set_cluster_cmd = [ + "config", + "set-cluster", + cluster_name, + "--kubeconfig={}".format(kube_config), + "--server={}".format(endpoint.strip('"')), + "--embed-certs=true", + "--certificate-authority={}".format(ca_crt_file_name), +] +run_command(set_cluster_cmd, as_json=False) + +user_token = base64.b64decode(secret_data["data"]["token"]).decode("utf-8") +set_credentials_cmd = [ + "config", + "set-credentials", + context_name, + "--token={}".format(user_token), + "--kubeconfig={}".format(kube_config), +] +run_command(set_credentials_cmd, as_json=False, log_command=False) + +set_context_cmd = [ + "config", + "set-context", + context_name, + "--cluster={}".format(cluster_name), + "--user={}".format(context_name), + "--kubeconfig={}".format(kube_config), +] +run_command(set_context_cmd, as_json=False) + +use_context_cmd = ["config", "use-context", context_name, "--kubeconfig={}".format(kube_config)] +run_command(use_context_cmd, as_json=False) + +print("Generated the kubeconfig file: {}".format(kube_config)) diff --git a/addons/yugabytedb/2024/chart/yugabytedb/openshift.values.yaml b/addons/yugabytedb/2024/chart/yugabytedb/openshift.values.yaml new file mode 100644 index 00000000..d2784b23 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/openshift.values.yaml @@ -0,0 +1,4 @@ +# OCP compatible values for yugabyte + +Image: + repository: "quay.io/yugabyte/yugabyte-ubi" diff --git a/addons/yugabytedb/2024/chart/yugabytedb/questions.yaml b/addons/yugabytedb/2024/chart/yugabytedb/questions.yaml new file mode 100644 index 00000000..6befa49e --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/questions.yaml @@ -0,0 +1,174 @@ +--- +questions: + ## Default images for yugabyte + - variable: questions.defaultYugabyteDB + default: true + description: "Use default settings for YugabyteDB" + label: Use default + type: boolean + show_subquestion_if: false + group: "YugabyteDB" + subquestions: + - variable: Image.repository + default: "yugabytedb/yugabyte" + required: true + type: string + label: YugabyteDB image repository + description: "YugabyteDB image repository" + - variable: Image.tag + default: "2.5.1.0-b153" + required: true + type: string + label: YugabyteDB image tag + description: "YugabyteDB image tag" + - variable: Image.pullPolicy + default: "IfNotPresent" + required: false + type: enum + options: + - "Always" + - "IfNotPresent" + label: YugabyteDB image pull policy + description: "YugabyteDB image pull policy" + - variable: storage.ephemeral + default: false + required: false + type: boolean + label: YugabyteDB ephemeral storage + description: "Won't allocate PVs when true" + - variable: replicas.master + default: 3 + description: "Number of replicas for Master" + type: int + required: true + label: Replication Factor Master + - variable: replicas.tserver + default: 3 + description: "Number of replicas for TServer" + type: int + required: true + label: Replication Factor TServer + - variable: statefulSetAnnotations + description: Annotations for the StatefulSet + type: dict + required: false + label: "Annotations for the StatefulSet" + - variable: questions.defaultMasterStorage + default: true + description: "Use default storage configurations for YugabyteDB Master" + label: Use default storage configurations + type: boolean + show_subquestion_if: false + group: "Master Storage" + subquestions: + - variable: storage.master.count + default: 2 + required: true + type: int + label: YugabyteDB master storage disk count + description: "YugabyteDB master storage disk count" + - variable: storage.master.size + default: "10Gi" + required: true + type: string + label: YugabyteDB master storage size + description: "YugabyteDB master storage size" + - variable: storage.master.storageClass + default: "" + required: false + type: storageclass + label: YugabyteDB master storage class + description: "YugabyteDB master storage class" + - variable: questions.defaultTServerStorage + default: true + description: "Use default storage configurations for YugabyteDB TServer" + label: Use default storage configuration + type: boolean + show_subquestion_if: false + group: "TServer Storage" + subquestions: + - variable: storage.tserver.count + default: 2 + required: true + type: int + label: YugabyteDB TServer storage disk count + description: "YugabyteDB TServer storage disk count" + - variable: storage.tserver.size + default: "10Gi" + required: true + type: string + label: YugabyteDB TServer storage size + description: "YugabyteDB TServer storage size" + - variable: storage.tserver.storageClass + default: "" + required: false + type: storageclass + label: YugabyteDB TServer storage class + description: "YugabyteDB TServer storage class" + ## Default resources + - variable: questions.defaultResources + default: true + description: "Use default resources for YugabyteDB" + label: Use default resources + type: boolean + show_subquestion_if: false + group: "Resources" + subquestions: + - variable: resource.master.requests.cpu + default: "2" + description: "Master vcpu allocation for YugabyteDB" + type: string + required: true + label: vcpu allocation for master + - variable: resource.master.requests.memory + default: "2Gi" + description: "Master RAM allocation for YugabyteDB" + type: string + required: true + label: RAM allocation for master + - variable: resource.tserver.requests.cpu + default: "2" + description: "TServer vcpu allocation for YugabyteDB" + type: string + required: true + label: vcpu allocation for tserver + - variable: resource.tserver.requests.memory + default: "4Gi" + description: "TServer RAM allocation for YugabyteDB" + type: string + required: true + label: RAM allocation for tserver + ## TLS + - variable: tls.enabled + default: false + description: "Enable TLS - TLS disabled by default" + label: Enable TLS + type: boolean + show_subquestion_if: true + group: "TLS" + subquestions: + - variable: tls.nodeToNode + default: true + description: "Node to Node" + type: boolean + required: false + label: Node to Node + - variable: tls.clientToServer + default: true + description: "Client to server" + type: boolean + required: false + label: Client to server + - variable: tls.insecure + default: false + description: "Insecure - no service will connect on unencrypted connection" + type: boolean + required: false + label: Insecure communication + - variable: tls.certManager.enabled + default: false + description: "Use cert-manager to provide cluster certificates" + type: boolean + required: false + label: Cert-Manager Support + diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/NOTES.txt b/addons/yugabytedb/2024/chart/yugabytedb/templates/NOTES.txt new file mode 100644 index 00000000..2f90d749 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/NOTES.txt @@ -0,0 +1,29 @@ +1. Get YugabyteDB Pods by running this command: + kubectl --namespace {{ .Release.Namespace }} get pods + +2. Get list of YugabyteDB services that are running: + kubectl --namespace {{ .Release.Namespace }} get services + +3. Get information about the load balancer services: + kubectl get svc --namespace {{ .Release.Namespace }} + +4. Connect to one of the tablet server: + kubectl exec --namespace {{ .Release.Namespace }} -it yb-tserver-0 bash + +5. Run YSQL shell from inside of a tablet server: + kubectl exec --namespace {{ .Release.Namespace }} -it yb-tserver-0 -- /home/yugabyte/bin/ysqlsh -h yb-tserver-0.yb-tservers.{{ .Release.Namespace }} + +6. Cleanup YugabyteDB Pods + For helm 2: + helm delete {{ .Release.Name }} --purge + For helm 3: + helm delete {{ .Release.Name }} -n {{ .Release.Namespace }} + NOTE: You need to manually delete the persistent volume + {{- $root := . -}} + {{- range .Values.Services }} + kubectl delete pvc --namespace {{ $root.Release.Namespace }} -l app={{.label}} + {{- end }} +{{ if $root.Values.yugabytedUi.enabled }} +NOTE: The yugabyted UI is now available and is enabled by default. It requires version 2.21.0 or greater. +If you are using a custom image of YugabyteDB that is older than 2.21.0, please disable the UI by setting yugabytedUi.enabled to false. +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/_helpers.tpl b/addons/yugabytedb/2024/chart/yugabytedb/templates/_helpers.tpl new file mode 100644 index 00000000..1ae79d23 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/_helpers.tpl @@ -0,0 +1,548 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +The components in this chart create additional resources that expand the longest created name strings. +The longest name that gets created of 20 characters, so truncation should be 63-20=43. +*/}} +{{- define "yugabyte.fullname" -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 43 | trimSuffix "-" -}} + {{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 43 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 43 | trimSuffix "-" -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate common labels. +*/}} +{{- define "yugabyte.labels" }} +heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }} +release: {{ .Release.Name | quote }} +chart: {{ .Chart.Name | quote }} +component: {{ .Values.Component | quote }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end }} + +{{/* +Generate app label. +*/}} +{{- define "yugabyte.applabel" }} +{{- if .root.Values.oldNamingStyle }} +app: "{{ .label }}" +{{- else }} +app.kubernetes.io/name: "{{ .label }}" +{{- end }} +{{- end }} + +{{/* +Generate app selector. +*/}} +{{- define "yugabyte.appselector" }} +{{- if .root.Values.oldNamingStyle }} +app: "{{ .label }}" +{{- else }} +app.kubernetes.io/name: "{{ .label }}" +release: {{ .root.Release.Name | quote }} +{{- end }} +{{- end }} + +{{/* +Create secrets in DBNamespace from other namespaces by iterating over envSecrets. +*/}} +{{- define "yugabyte.envsecrets" -}} +{{- range $v := .secretenv }} +{{- if $v.valueFrom.secretKeyRef.namespace }} +{{- $secretObj := (lookup +"v1" +"Secret" +$v.valueFrom.secretKeyRef.namespace +$v.valueFrom.secretKeyRef.name) +| default dict }} +{{- $secretData := (get $secretObj "data") | default dict }} +{{- $secretValue := (get $secretData $v.valueFrom.secretKeyRef.key) | default "" }} +{{- if (and (not $secretValue) (not $v.valueFrom.secretKeyRef.optional)) }} +{{- required (printf "Secret or key missing for %s/%s in namespace: %s" +$v.valueFrom.secretKeyRef.name +$v.valueFrom.secretKeyRef.key +$v.valueFrom.secretKeyRef.namespace) +nil }} +{{- end }} +{{- if $secretValue }} +apiVersion: v1 +kind: Secret +metadata: + {{- $secretfullname := printf "%s-%s-%s-%s" + $.root.Release.Name + $v.valueFrom.secretKeyRef.namespace + $v.valueFrom.secretKeyRef.name + $v.valueFrom.secretKeyRef.key + }} + name: {{ printf "%s-%s-%s-%s-%s-%s" + $.root.Release.Name + ($v.valueFrom.secretKeyRef.namespace | substr 0 5) + ($v.valueFrom.secretKeyRef.name | substr 0 5) + ( $v.valueFrom.secretKeyRef.key | substr 0 5) + (sha256sum $secretfullname | substr 0 4) + ($.suffix) + | lower | replace "." "" | replace "_" "" + }} + namespace: "{{ $.root.Release.Namespace }}" + labels: + {{- include "yugabyte.labels" $.root | indent 4 }} +type: Opaque # should it be an Opaque secret? +data: + {{ $v.valueFrom.secretKeyRef.key }}: {{ $secretValue | quote }} +{{- end }} +{{- end }} +--- +{{- end }} +{{- end }} + +{{/* +Add env secrets to DB statefulset. +*/}} +{{- define "yugabyte.addenvsecrets" -}} +{{- range $v := .secretenv }} +- name: {{ $v.name }} + valueFrom: + secretKeyRef: + {{- if $v.valueFrom.secretKeyRef.namespace }} + {{- $secretfullname := printf "%s-%s-%s-%s" + $.root.Release.Name + $v.valueFrom.secretKeyRef.namespace + $v.valueFrom.secretKeyRef.name + $v.valueFrom.secretKeyRef.key + }} + name: {{ printf "%s-%s-%s-%s-%s-%s" + $.root.Release.Name + ($v.valueFrom.secretKeyRef.namespace | substr 0 5) + ($v.valueFrom.secretKeyRef.name | substr 0 5) + ($v.valueFrom.secretKeyRef.key | substr 0 5) + (sha256sum $secretfullname | substr 0 4) + ($.suffix) + | lower | replace "." "" | replace "_" "" + }} + {{- else }} + name: {{ $v.valueFrom.secretKeyRef.name }} + {{- end }} + key: {{ $v.valueFrom.secretKeyRef.key }} + optional: {{ $v.valueFrom.secretKeyRef.optional | default "false" }} +{{- end }} +{{- end }} +{{/* +Create Volume name. +*/}} +{{- define "yugabyte.volume_name" -}} + {{- printf "%s-datadir" (include "yugabyte.fullname" .) -}} +{{- end -}} + +{{/* +Derive the memory hard limit in bytes for Master and Tserver components based on +a given memory size and a limit percentage. + +The function expects two parameters: +1. 'size': Specifies memory in 'G' or 'Gi' format (e.g., "2Gi"). +2. 'limitPercent': An integer representing the percentage of the memory limit (e.g., 85 for 85%). + +It uses a base multiplier of 1000 for 'G' units and 1024 for 'Gi' units. +*/}} +{{- define "yugabyte.memory_hard_limit" -}} + {{- $baseMultiplier := 1000 -}} + {{- if .size | toString | hasSuffix "Gi" -}} + {{- $baseMultiplier = 1024 -}} + {{- end -}} + {{- $limit_percent := .limitPercent -}} + {{- $multiplier := int (div (mul $limit_percent $baseMultiplier) 100) -}} + {{- printf "%d" .size | regexFind "\\d+" | mul $baseMultiplier | mul $baseMultiplier | mul $multiplier -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "yugabyte.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate a preflight check script invocation. +*/}} +{{- define "yugabyte.preflight_check" -}} +{{- if not .Values.preflight.skipAll -}} +{{- $port := .Preflight.Port -}} +{{- range $addr := split "," .Preflight.Addr -}} +if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then + PYTHONUNBUFFERED="true" /home/yugabyte/tools/k8s_preflight.py \ + dnscheck \ + --addr="{{ $addr }}" \ +{{- if not $.Values.preflight.skipBind }} + --port="{{ $port }}" +{{- else }} + --skip_bind +{{- end }} +fi && \ +{{ end }} +{{- end }} +{{- end }} + +{{/* +Get YugaByte fs data directories. +*/}} +{{- define "yugabyte.fs_data_dirs" -}} + {{- range $index := until (int (.count)) -}} + {{- if ne $index 0 }},{{ end }}/mnt/disk{{ $index -}} + {{- end -}} +{{- end -}} + +{{/* +Get files from fs data directories for readiness / liveness probes. +*/}} +{{- define "yugabyte.fs_data_dirs_probe_files" -}} + {{- range $index := until (int (.count)) -}} + {{- if ne $index 0 }} {{ end }}"/mnt/disk{{ $index -}}/disk.check" + {{- end -}} +{{- end -}} + + +{{/* +Command to do a disk write and sync for liveness probes. +*/}} +{{- define "yugabyte.fs_data_dirs_probe" -}} +echo "disk check at: $(date)" \ + | tee {{ template "yugabyte.fs_data_dirs_probe_files" . }} \ + && sync {{ template "yugabyte.fs_data_dirs_probe_files" . }} +{{- end -}} + + +{{/* +Generate server FQDN. +*/}} +{{- define "yugabyte.server_fqdn" -}} + {{- if .Values.multicluster.createServicePerPod -}} + {{- printf "$(HOSTNAME).$(NAMESPACE).svc.%s" .Values.domainName -}} + {{- else if (and .Values.oldNamingStyle .Values.multicluster.createServiceExports) -}} + {{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }} + {{- printf "$(HOSTNAME).%s.%s.$(NAMESPACE).svc.clusterset.local" $membershipName .Service.name -}} + {{- else if .Values.oldNamingStyle -}} + {{- printf "$(HOSTNAME).%s.$(NAMESPACE).svc.%s" .Service.name .Values.domainName -}} + {{- else -}} + {{- if .Values.multicluster.createServiceExports -}} + {{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }} + {{- printf "$(HOSTNAME).%s.%s-%s.$(NAMESPACE).svc.clusterset.local" $membershipName (include "yugabyte.fullname" .) .Service.name -}} + {{- else -}} + {{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate server broadcast address. +*/}} +{{- define "yugabyte.server_broadcast_address" -}} + {{- include "yugabyte.server_fqdn" . }}:{{ index .Service.ports "tcp-rpc-port" -}} +{{- end -}} + +{{/* +Generate server RPC bind address. + +In case of multi-cluster services (MCS), we set it to $(POD_IP) to +ensure YCQL uses a resolvable address. +See https://github.com/yugabyte/yugabyte-db/issues/16155 + +We use a workaround for above in case of Istio by setting it to +$(POD_IP) and localhost. Master doesn't support that combination, so +we stick to 0.0.0.0, which works for master. +*/}} +{{- define "yugabyte.rpc_bind_address" -}} + {{- $port := index .Service.ports "tcp-rpc-port" -}} + {{- if .Values.istioCompatibility.enabled -}} + {{- if (eq .Service.name "yb-masters") -}} + 0.0.0.0:{{ $port }} + {{- else -}} + $(POD_IP):{{ $port }},127.0.0.1:{{ $port }} + {{- end -}} + {{- else if (or .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod) -}} + $(POD_IP):{{ $port }} + {{- else -}} + {{- include "yugabyte.server_fqdn" . -}} + {{- end -}} +{{- end -}} + +{{/* +Generate server web interface. +*/}} +{{- define "yugabyte.webserver_interface" -}} + {{- eq .Values.ip_version_support "v6_only" | ternary "[::]" "0.0.0.0" -}} +{{- end -}} + +{{/* +Generate server CQL proxy bind address. +*/}} +{{- define "yugabyte.cql_proxy_bind_address" -}} + {{- if or .Values.istioCompatibility.enabled .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod -}} + 0.0.0.0:{{ index .Service.ports "tcp-yql-port" -}} + {{- else -}} + {{- include "yugabyte.server_fqdn" . -}} + {{- end -}} +{{- end -}} + +{{/* +Generate server PGSQL proxy bind address. +*/}} +{{- define "yugabyte.pgsql_proxy_bind_address" -}} + {{- eq .Values.ip_version_support "v6_only" | ternary "[::]" "0.0.0.0" -}}:{{ index .Service.ports "tcp-ysql-port" -}} +{{- end -}} + +{{/* +Get YugaByte master addresses +*/}} +{{- define "yugabyte.master_addresses" -}} + {{- $master_replicas := .Values.replicas.master | int -}} + {{- $domain_name := .Values.domainName -}} + {{- $newNamingStylePrefix := printf "%s-" (include "yugabyte.fullname" .) -}} + {{- $prefix := ternary "" $newNamingStylePrefix $.Values.oldNamingStyle -}} + {{- range .Values.Services -}} + {{- if eq .name "yb-masters" -}} + {{- range $index := until $master_replicas -}} + {{- if ne $index 0 }},{{ end -}} + {{- $prefix }}yb-master-{{ $index }}.{{ $prefix }}yb-masters.$(NAMESPACE).svc.{{ $domain_name }}:7100 + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Compute the maximum number of unavailable pods based on the number of master replicas +*/}} +{{- define "yugabyte.max_unavailable_for_quorum" -}} + {{- $master_replicas_100x := .Values.replicas.master | int | mul 100 -}} + {{- $max_unavailable_master_replicas := 100 | div (100 | sub (2 | div ($master_replicas_100x | add 100))) -}} + {{- printf "%d" $max_unavailable_master_replicas -}} +{{- end -}} + +{{/* +Set consistent issuer name. +*/}} +{{- define "yugabyte.tls_cm_issuer" -}} + {{- if .Values.tls.certManager.bootstrapSelfsigned -}} + {{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }} + {{- else -}} + {{ .Values.tls.certManager.useClusterIssuer | ternary .Values.tls.certManager.clusterIssuer .Values.tls.certManager.issuer}} + {{- end -}} +{{- end -}} + +{{/* + Verify the extraVolumes and extraVolumeMounts mappings. + Every extraVolumes should have extraVolumeMounts +*/}} +{{- define "yugabyte.isExtraVolumesMappingExists" -}} + {{- $lenExtraVolumes := len .extraVolumes -}} + {{- $lenExtraVolumeMounts := len .extraVolumeMounts -}} + + {{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}} + {{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}} + {{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}} + {{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}} + {{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}} + {{- $volumeMountsList := list -}} + {{- range .extraVolumeMounts -}} + {{- $volumeMountsList = append $volumeMountsList .name -}} + {{- end -}} + + {{- $volumesList := list -}} + {{- range .extraVolumes -}} + {{- $volumesList = append $volumesList .name -}} + {{- end -}} + + {{- range $volumesList -}} + {{- if not (has . $volumeMountsList) -}} + {{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}} + {{- end -}} + {{- end -}} + + {{- range $volumeMountsList -}} + {{- if not (has . $volumesList) -}} + {{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* + Default nodeAffinity for multi-az deployments +*/}} +{{- define "yugabyte.multiAZNodeAffinity" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - {{ quote .Values.AZ }} + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - {{ quote .Values.AZ }} +{{- end -}} + +{{/* + Default podAntiAffinity for master and tserver + + This requires "appLabelArgs" to be passed in - defined in service.yaml + we have a .root and a .label in appLabelArgs +*/}} +{{- define "yugabyte.podAntiAffinity" -}} +preferredDuringSchedulingIgnoredDuringExecution: +- weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + {{- if .root.Values.oldNamingStyle }} + - key: app + operator: In + values: + - "{{ .label }}" + {{- else }} + - key: app.kubernetes.io/name + operator: In + values: + - "{{ .label }}" + - key: release + operator: In + values: + - {{ .root.Release.Name | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* + YB Master ports +*/}} +{{- define "yugabyte.yb_masters.ports" -}} +{{- $masterPorts := dict -}} +{{- range .Values.Services -}} + {{- if eq .name "yb-masters" -}} + {{- range $key, $value := .ports -}} + {{- $masterPorts = set $masterPorts $key $value -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- toYaml $masterPorts -}} +{{- end -}} + +{{/* + Readiness Probe for Master +*/}} +{{- define "yugabyte.master.readinessProbe" -}} +{{- if .Values.master.customReadinessProbe -}} +{{- toYaml .Values.master.customReadinessProbe }} +{{- else if .Values.master.readinessProbe.enabled -}} +{{- toYaml (omit .Values.master.readinessProbe "enabled") }} +httpGet: + path: / + port: {{ index (include "yugabyte.yb_masters.ports" .| fromYaml) "http-ui" }} +{{- end -}} +{{- end -}} + +{{/* + YB Tservers ports +*/}} +{{- define "yugabyte.yb_tservers.ports" -}} +{{- $tserverPorts := dict -}} +{{- range .Values.Services }} + {{- if eq .name "yb-tservers" }} + {{- range $key, $value := .ports }} + {{- $tserverPorts = set $tserverPorts $key $value }} + {{- end }} + {{- end }} +{{- end }} +{{- toYaml $tserverPorts -}} +{{- end -}} + +{{/* + Readiness Probe for Tserver + Use ".Values.authCredentials.ysql.password" while setting ysql credentials through YB DB values.yaml + Use ".Values.gflags.tserver.ysql_enable_auth" while setting ysql credentials through YBA +*/}} +{{- define "yugabyte.tserver.readinessProbe" -}} +{{- if .Values.tserver.customReadinessProbe -}} +{{- toYaml .Values.tserver.customReadinessProbe }} +{{- else if .Values.tserver.readinessProbe.enabled -}} +{{- toYaml (omit .Values.tserver.readinessProbe "enabled") }} +exec: + command: + - bash + - -v + - -c + - | + {{- if not .Values.disableYsql }} + {{- if (or .Values.authCredentials.ysql.password (eq .Values.gflags.tserver.ysql_enable_auth "true")) }} + unix_socket=$(find /tmp -name ".yb.*"); + ysqlsh_output=$(ysqlsh -U yugabyte -h "${unix_socket}" -d system_platform -c "\\conninfo"); + exit_code="$?"; + {{- else }} + ysqlsh_output=$(ysqlsh -U yugabyte -h 127.0.0.1 -p {{ index (include "yugabyte.yb_tservers.ports" . | fromYaml) "tcp-ysql-port" }} -d system_platform -c "\\conninfo"); + exit_code="$?"; + {{- end }} + + if [[ $exit_code -ne 0 ]]; then + echo "Error while executing ysqlsh command. Exit code: ${exit_code}"; + echo "Error: ${ysqlsh_output}"; + exit "${exit_code}" + fi + {{- end }} + + {{- if not (eq .Values.gflags.tserver.start_cql_proxy "false") }} + {{- if (and .Values.tls.enabled .Values.tls.clientToServer) }} + ycqlsh_output=$(ycqlsh --debug --ssl -e "SHOW HOST" "$HOSTNAME" {{ index (include "yugabyte.yb_tservers.ports" . | fromYaml) "tcp-yql-port" }} 2>&1); + {{- else }} + ycqlsh_output=$(ycqlsh --debug -e "SHOW HOST" "$HOSTNAME" {{ index (include "yugabyte.yb_tservers.ports" . | fromYaml) "tcp-yql-port" }} 2>&1); + {{- end }} + exit_code="$?"; + + if [[ $exit_code -ne 0 && "${ycqlsh_output}" != *"Remote end requires authentication"* ]]; then + echo "Error while executing ycqlsh command. Exit code: ${exit_code}"; + echo "Error: ${ycqlsh_output}"; + exit "${exit_code}" + fi + {{- end }} + + exit 0 +{{- end -}} +{{- end -}} + +{{/* + Startup Probe for Master +*/}} +{{- define "yugabyte.master.startupProbe" -}} +{{- if .Values.master.customStartupProbe -}} +{{- toYaml .Values.master.customStartupProbe }} +{{- else if .Values.master.startupProbe.enabled -}} +{{- toYaml (omit .Values.master.startupProbe "enabled") }} +tcpSocket: + port: {{ index (include "yugabyte.yb_masters.ports" .| fromYaml) "tcp-rpc-port" }} +{{- end -}} +{{- end -}} + +{{/* + Startup Probe for Tserver +*/}} +{{- define "yugabyte.tserver.startupProbe" -}} +{{- if .Values.tserver.customStartupProbe -}} +{{- toYaml .Values.tserver.customStartupProbe }} +{{- else if .Values.tserver.startupProbe.enabled -}} +{{- toYaml (omit .Values.tserver.startupProbe "enabled") }} +tcpSocket: + port: {{ index (include "yugabyte.yb_tservers.ports" .| fromYaml) "tcp-rpc-port" }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/bind.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/bind.yaml new file mode 100644 index 00000000..e1ea2b53 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/bind.yaml @@ -0,0 +1,33 @@ +credential: + - name: TSERVER_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .spec.clusterIP }' + - name: TSERVER_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .spec.clusterIP }' + - name: MASTER_UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: MASTER_UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .spec.clusterIP }' + - name: TSERVER_PORT + value: "5433" \ No newline at end of file diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/certificates.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/certificates.yaml new file mode 100644 index 00000000..07fc2e5f --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/certificates.yaml @@ -0,0 +1,150 @@ +{{- $root := . -}} +--- +{{- if $root.Values.tls.certManager.enabled }} +{{- if $root.Values.tls.certManager.bootstrapSelfsigned }} +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ (printf "%s-bootstrap" (include "yugabyte.tls_cm_issuer" $root)) | quote }} + namespace: "{{ $root.Release.Namespace }}" +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }} + namespace: "{{ $root.Release.Namespace }}" +spec: + isCA: true + privateKey: + algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }} + encoding: PKCS8 + size: {{ $root.Values.tls.certManager.certificates.keySize }} + commonName: Yugabyte Selfsigned CA + secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }} + issuerRef: + name: {{ (printf "%s-bootstrap" (include "yugabyte.tls_cm_issuer" $root)) | quote }} + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ include "yugabyte.tls_cm_issuer" $root | quote }} + namespace: "{{ $root.Release.Namespace }}" +spec: + ca: + secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }} +--- +{{- else }} +{{/* when bootstrapSelfsigned = false, ie. when using an external CA. +Create a Secret with just the rootCA.cert value and mount into master/tserver pods. +This will be used as a fall back in case the Secret generated by cert-manager does not +have a root ca.crt. This can happen for certain certificate issuers like LetsEncrypt. +*/}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }} + namespace: "{{ $root.Release.Namespace }}" + labels: + {{- include "yugabyte.labels" $root | indent 4 }} +type: Opaque +data: + ca.crt: {{ $root.Values.tls.rootCA.cert }} +--- +{{- end }} + + +{{/* +The below Certificate resource will trigger cert-manager to issue crt/key into Secrets. +These secrets are mounted into master/tserver pods. +*/}} +{{- range .Values.Services }} +{{- $service := . -}} +{{- $appLabelArgs := dict "label" .label "root" $root -}} +{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}} +{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}} + +{{- if (gt (int $replicas) 0) }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }} + namespace: "{{ $root.Release.Namespace }}" +spec: + secretTemplate: + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 6 }} + {{- include "yugabyte.labels" $root | indent 6 }} + issuerRef: + name: {{ include "yugabyte.tls_cm_issuer" $root | quote }} + {{- if $root.Values.tls.certManager.useClusterIssuer }} + kind: ClusterIssuer + {{- else }} + kind: Issuer + {{- end }} + secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }} + duration: {{ $root.Values.tls.certManager.certificates.duration | quote }} + renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }} + isCA: false + privateKey: + algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }} + encoding: PKCS8 + size: {{ $root.Values.tls.certManager.certificates.keySize }} + rotationPolicy: Always + usages: + - server auth + - client auth + # At least one of a DNS Name, URI, or IP address is required. + dnsNames: + {{- range $index := until ( int ( $replicas ) ) }} + {{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }} + {{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} + {{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }} + - {{$node}} + {{- end }} + - {{ printf "%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} + uris: [] + ipAddresses: [] +--- +{{- end }} +{{- end }} + +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + namespace: "{{ $root.Release.Namespace }}" +spec: + secretTemplate: + labels: + {{- include "yugabyte.labels" $root | indent 6 }} + issuerRef: + name: {{ include "yugabyte.tls_cm_issuer" $root | quote }} + {{- if $root.Values.tls.certManager.useClusterIssuer }} + kind: ClusterIssuer + {{- else }} + kind: Issuer + {{- end }} + secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + duration: {{ $root.Values.tls.certManager.certificates.duration | quote }} + renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }} + commonName: yugabyte + isCA: false + privateKey: + algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }} + encoding: PKCS8 + size: {{ $root.Values.tls.certManager.certificates.keySize }} + rotationPolicy: Always + usages: + - client auth + dnsNames: [] + uris: [] + ipAddresses: [] +--- +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/debug_config_map.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/debug_config_map.yaml new file mode 100644 index 00000000..a15c4fc9 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/debug_config_map.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "yugabyte.fullname" . }}-master-hooks + namespace: "{{ .Release.Namespace }}" +data: +{{- range $index := until ( int ( .Values.replicas.master ) ) }} + yb-master-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' " + yb-master-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' " +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "yugabyte.fullname" . }}-tserver-hooks + namespace: "{{ .Release.Namespace }}" +data: +{{- range $index := until ( int ( .Values.replicas.tserver) ) }} + yb-tserver-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' " + yb-tserver-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' " +{{- end }} +--- diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/hooks/setup-credentials-job.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/hooks/setup-credentials-job.yaml new file mode 100644 index 00000000..1bde38cb --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/hooks/setup-credentials-job.yaml @@ -0,0 +1,80 @@ +{{- if or .Values.authCredentials.ycql.user .Values.authCredentials.ycql.password .Values.authCredentials.ycql.keyspace .Values.authCredentials.ysql.password .Values.authCredentials.ysql.user .Values.authCredentials.ysql.database }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "yugabyte.fullname" . }}-setup-credentials + namespace: "{{ .Release.Namespace }}" + labels: + app: "setup-credentials" + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + component: "{{ .Values.Component }}" + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + backoffLimit: 2 + template: + metadata: + name: "setup-credentials" + labels: + app: "setup-credentials" + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + component: "{{ .Values.Component }}" + spec: + restartPolicy: Never + containers: + - name: setup-credentials + image: "{{ .Values.Image.repository }}:{{ .Values.Image.tag }}" + env: + {{- if .Values.authCredentials.ysql.user }} + - name: YSQL_USER + value: "{{ .Values.authCredentials.ysql.user }}" + {{- end }} + {{- if .Values.authCredentials.ysql.password }} + - name: YSQL_PASSWORD + value: "{{ .Values.authCredentials.ysql.password }}" + {{- end }} + {{- if .Values.authCredentials.ysql.database }} + - name: YSQL_DB + value: "{{ .Values.authCredentials.ysql.database }}" + {{- end }} + {{- if .Values.authCredentials.ycql.user }} + - name: YCQL_USER + value: "{{ .Values.authCredentials.ycql.user }}" + {{- end }} + {{- if .Values.authCredentials.ycql.password }} + - name: YCQL_PASSWORD + value: "{{ .Values.authCredentials.ycql.password }}" + {{- end }} + {{- if .Values.authCredentials.ycql.keyspace }} + - name: YCQL_KEYSPACE + value: "{{ .Values.authCredentials.ycql.keyspace }}" + {{- end }} + {{- if .Values.tls.enabled }} + - name: SSL_CERTFILE + value: "/root/.yugabytedb/root.crt" + {{- end }} + command: + - 'bash' + - '/home/yugabyte/bin/setup-credentials/setup-credentials.sh' + volumeMounts: + - name: setup-credentials-script + mountPath: "/home/yugabyte/bin/setup-credentials" + {{- if .Values.tls.enabled }} + - name: yugabyte-tls-client-cert + mountPath: "/root/.yugabytedb/" + {{- end }} + volumes: + - name: setup-credentials-script + configMap: + name: {{ include "yugabyte.fullname" . }}-setup-credentials-script + {{- if .Values.tls.enabled }} + - name: yugabyte-tls-client-cert + secret: + secretName: {{ .Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" . )) }} + defaultMode: 256 + {{- end }} +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/master-servicemonitor.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/master-servicemonitor.yaml new file mode 100644 index 00000000..0e3c4314 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/master-servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.master.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "yugabyte.fullname" . }}-yb-master + labels: + {{- if .Values.oldNamingStyle }} + app: "yb-master" + {{- else }} + app.kubernetes.io/name: "yb-master" + {{- end }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + component: "{{ .Values.Component }}" + {{- with .Values.serviceMonitor.extraLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: "release" + selector: + matchLabels: + {{- if .Values.oldNamingStyle }} + app: "yb-master" + {{- else }} + app.kubernetes.io/name: "yb-master" + {{- end }} + release: {{ .Release.Name | quote }} + service-type: "headless" + endpoints: + + {{- with .Values.serviceMonitor.master }} + {{- if .enabled }} + - port: {{ .port }} + path: {{ .path }} + {{- if .interval }} + interval: {{ .interval }} + {{- else }} + interval: {{ $.Values.serviceMonitor.interval }} + {{- end }} + relabelings: + - targetLabel: "group" + replacement: "yb-master" + - targetLabel: "export_type" + replacement: "master_export" + - targetLabel: "node_prefix" + replacement: {{ $.Release.Name | quote }} + metricRelabelings: + {{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/common-tserver-service.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/common-tserver-service.yaml new file mode 100644 index 00000000..68f250d2 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/common-tserver-service.yaml @@ -0,0 +1,23 @@ +{{- if (and .Values.multicluster.createCommonTserverService (not .Values.oldNamingStyle)) }} +{{- range $service := .Values.serviceEndpoints }} +{{- if eq $service.name "yb-tserver-service" }} +{{- $appLabelArgs := dict "label" $service.app "root" $ -}} +apiVersion: v1 +kind: Service +metadata: + name: "yb-tserver-common" + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $ | indent 4 }} +spec: + ports: + {{- range $label, $port := $service.ports }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + selector: + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/mcs-service-export.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/mcs-service-export.yaml new file mode 100644 index 00000000..eeafcb1b --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/mcs-service-export.yaml @@ -0,0 +1,21 @@ +{{- /* + Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export + https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services +*/}} +{{- if .Values.multicluster.createServiceExports }} +apiVersion: {{ .Values.multicluster.mcsApiVersion }} +kind: ServiceExport +metadata: + name: {{ .Values.oldNamingStyle | ternary "yb-masters" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-masters") | quote }} + namespace: "{{ .Release.Namespace }}" + labels: + {{- include "yugabyte.labels" . | indent 4 }} +--- +apiVersion: {{ .Values.multicluster.mcsApiVersion }} +kind: ServiceExport +metadata: + name: {{ .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-tservers") | quote }} + namespace: "{{ .Release.Namespace }}" + labels: + {{- include "yugabyte.labels" . | indent 4 }} +{{ end -}} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/service-per-pod.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/service-per-pod.yaml new file mode 100644 index 00000000..15e09dce --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/multicluster/service-per-pod.yaml @@ -0,0 +1,34 @@ +{{- if .Values.multicluster.createServicePerPod }} +{{- range $server := .Values.Services }} +{{- range $replicaNum := until (int (ternary $.Values.replicas.master $.Values.replicas.tserver (eq $server.name "yb-masters"))) }} +{{- $appLabelArgs := dict "label" $server.label "root" $ -}} +{{- $podName := $.Values.oldNamingStyle | ternary $server.label (printf "%s-%s" (include "yugabyte.fullname" $) $server.label) -}} +{{- $podName := printf "%s-%d" $podName $replicaNum -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ $podName | quote }} + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $ | indent 4 }} + service-type: "non-endpoint" +spec: + ports: + {{- range $label, $port := $server.ports }} + {{- if (eq $label "grpc-ybc-port") }} + {{- if $.Values.ybc.enabled }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + {{- else }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + {{- end}} + selector: + statefulset.kubernetes.io/pod-name: {{ $podName | quote }} + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/service.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/service.yaml new file mode 100644 index 00000000..00289783 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/service.yaml @@ -0,0 +1,898 @@ +{{- $root := . -}} +{{- if and (eq $root.Values.tls.enabled true) (eq $root.Values.tls.certManager.enabled false) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + namespace: "{{ $root.Release.Namespace }}" + labels: + {{- include "yugabyte.labels" $root | indent 4 }} +type: Opaque +data: +{{- if $root.Values.tls.rootCA.key }} +{{- $rootCAClient := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}} +{{- $client := genSignedCert "yugabyte" ( default nil ) ( default nil ) 3650 $rootCAClient }} + root.crt: {{ $rootCAClient.Cert | b64enc }} + yugabytedb.crt: {{ $client.Cert | b64enc }} + yugabytedb.key: {{ $client.Key | b64enc }} +{{- else }} + root.crt: {{ $root.Values.tls.rootCA.cert }} + yugabytedb.crt: {{ $root.Values.tls.clientCert.cert }} + yugabytedb.key: {{ $root.Values.tls.clientCert.key }} +{{- end }} +--- +{{- end }} +--- +{{- range .Values.Services }} +{{- $service := . -}} +{{- $appLabelArgs := dict "label" .label "root" $root -}} +{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}} + +{{- if and (eq $root.Values.tls.enabled true) (eq $root.Values.tls.certManager.enabled false) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }} + namespace: "{{ $root.Release.Namespace }}" + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $root | indent 4 }} +type: Opaque +data: +{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}} +{{- range $index := until ( int ( $replicas ) ) }} +{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }} +{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} + +{{- if $root.Values.multicluster.createServiceExports -}} + {{- $nodeOldStyle = printf "%s-%d.%s.%s.%s.svc.clusterset.local" $service.label $index $root.Values.multicluster.kubernetesClusterId $service.name $root.Release.Namespace }} + {{- $nodeNewStyle = printf "%s-%s-%d.%s.%s-%s.%s.svc.clusterset.local" (include "yugabyte.fullname" $root) $service.label $index $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} +{{- end -}} + +{{- if $root.Values.multicluster.createServicePerPod -}} + {{- $nodeOldStyle = printf "%s-%d.%s.svc.%s" $service.label $index $root.Release.Namespace $root.Values.domainName }} + {{- $nodeNewStyle = printf "%s-%s-%d.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index $root.Release.Namespace $root.Values.domainName }} +{{- end -}} + +{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }} +{{- if $root.Values.tls.rootCA.key }} +{{- $dns1 := printf "*.%s-%s.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} +{{- $dns2 := printf "%s.svc.%s" $dns1 $root.Values.domainName }} +{{- if $root.Values.multicluster.createServiceExports -}} + {{- $dns1 = printf "*.%s.%s-%s.%s.svc.clusterset.local" $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} +{{- end -}} +{{- if $root.Values.multicluster.createServicePerPod -}} + {{- $dns1 = printf "*.%s.svc.%s" $root.Release.Namespace $root.Values.domainName }} +{{- end -}} +{{- $rootCA := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}} +{{- $server := genSignedCert $node ( default nil ) (list $node $dns1 $dns2 ) 3650 $rootCA }} + node.{{$node}}.crt: {{ $server.Cert | b64enc }} + node.{{$node}}.key: {{ $server.Key | b64enc }} +{{- else }} + node.{{$node}}.crt: {{ $root.Values.tls.nodeCert.cert }} + node.{{$node}}.key: {{ $root.Values.tls.nodeCert.key }} +{{- end }} +{{- end }} + ca.crt: {{ $root.Values.tls.rootCA.cert }} +{{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $root.Values.oldNamingStyle | ternary .name (printf "%s-%s" (include "yugabyte.fullname" $root) .name) | quote }} + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $root | indent 4 }} + service-type: "headless" +spec: + clusterIP: None + {{- if (or (include "yugabyte.master.readinessProbe" $root) (include "yugabyte.tserver.readinessProbe" $root)) }} + publishNotReadyAddresses: true + {{- end }} + ports: + {{- range $label, $port := .ports }} + {{- if (eq $label "grpc-ybc-port") }} + {{- if $root.Values.ybc.enabled }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + {{- else if (eq $label "yugabyted-ui") }} + {{- if $root.Values.yugabytedUi.enabled }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + {{- else }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end }} + {{- end}} + selector: + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }} + +{{- if $root.Values.enableLoadBalancer }} +{{- range $endpoint := $root.Values.serviceEndpoints }} +{{- if eq $service.label $endpoint.app }} +{{- if (or (ne $endpoint.name "yugabyted-ui-service") $root.Values.yugabytedUi.enabled) }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $root.Values.oldNamingStyle | ternary $endpoint.name (printf "%s-%s" (include "yugabyte.fullname" $root) $endpoint.name) | quote }} + annotations: +{{ toYaml $endpoint.annotations | indent 4 }} + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $root | indent 4 }} + service-type: "endpoint" +spec: + {{- if eq $root.Release.Service "Tiller" }} + clusterIP: + {{- else }} + {{- if $endpoint.clusterIP }} + clusterIP: {{ $endpoint.clusterIP }} + {{- end }} + {{- end }} + ports: + {{- range $label, $port := $endpoint.ports }} + - name: {{ $label | quote }} + port: {{ $port }} + {{- end}} + selector: + {{- if eq $endpoint.name "yugabyted-ui-service"}} + yugabytedUi: "true" + {{- else }} + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }} + {{- end }} + type: {{ $root.Values.service.type }} + externalTrafficPolicy: {{ $endpoint.externalTrafficPolicy | default "Cluster" }} + {{- if $endpoint.loadBalancerIP }} + loadBalancerIP: {{ $endpoint.loadBalancerIP }} + {{- end }} + {{- if eq $endpoint.name "yugabyted-ui-service"}} + sessionAffinity: {{ $endpoint.sessionAffinity }} + {{- end }} +{{- end}} +{{- end}} +{{- end}} +{{- end}} + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ $root.Values.oldNamingStyle | ternary .label (printf "%s-%s" (include "yugabyte.fullname" $root) .label) | quote }} + namespace: "{{ $root.Release.Namespace }}" + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} + {{- include "yugabyte.labels" $root | indent 4 }} + {{- if $root.Values.statefulSetAnnotations }} + annotations: +{{ toYaml $root.Values.statefulSetAnnotations | indent 4 }} + {{- end }} +spec: + serviceName: {{ $root.Values.oldNamingStyle | ternary .name (printf "%s-%s" (include "yugabyte.fullname" $root) .name) | quote }} + podManagementPolicy: {{ $root.Values.PodManagementPolicy }} + {{ if eq .name "yb-masters" }} + replicas: {{ $root.Values.replicas.master }} + {{ else }} + replicas: {{ $root.Values.replicas.tserver }} + {{ end }} + {{- $storageInfo := (eq .name "yb-masters") | ternary $root.Values.storage.master $root.Values.storage.tserver -}} + {{ if not $root.Values.storage.ephemeral }} + {{- if $root.Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ $root.Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ $root.Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + {{- range $index := until (int ($storageInfo.count )) }} + - metadata: + name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} + {{- if $root.Values.legacyVolumeClaimAnnotations }} + annotations: + volume.beta.kubernetes.io/storage-class: {{ $storageInfo.storageClass | quote }} + {{- end }} + labels: + {{- include "yugabyte.labels" $root | indent 10 }} + spec: + accessModes: + - "ReadWriteOnce" + {{- if $storageInfo.storageClass }} + storageClassName: {{ $storageInfo.storageClass }} + {{- end }} + resources: + requests: + storage: {{ $storageInfo.size }} + {{- end }} + {{- end }} + updateStrategy: + {{- if eq $root.Values.updateStrategy.type "RollingUpdate" }} + type: RollingUpdate + rollingUpdate: + {{- if eq .name "yb-masters" }} + partition: {{ $root.Values.partition.master }} + {{- else }} + partition: {{ $root.Values.partition.tserver }} + {{- end }} + {{- else }} + type: {{ $root.Values.updateStrategy.type }} + {{- end }} + selector: + matchLabels: + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 6 }} + template: + metadata: + {{- if eq .name "yb-masters" }} + {{- if (or $root.Values.networkAnnotation $root.Values.master.podAnnotations $root.Values.tls.enabled) }} + annotations: + {{- with $root.Values.networkAnnotation }}{{ toYaml . | nindent 8 }}{{ end }} + {{- with $root.Values.master.podAnnotations }}{{ toYaml . | nindent 8 }}{{ end }} + {{- if $root.Values.tls.enabled }} + checksum/rootCA: {{ cat $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key | sha256sum }} + {{- end }} + {{- end }} + {{- else }} + {{- if (or $root.Values.networkAnnotation $root.Values.tserver.podAnnotations $root.Values.tls.enabled) }} + annotations: + {{- with $root.Values.networkAnnotation }}{{ toYaml . | nindent 8 }}{{ end }} + {{- with $root.Values.tserver.podAnnotations }}{{ toYaml . | nindent 8 }}{{ end }} + {{- if $root.Values.tls.enabled }} + checksum/rootCA: {{ cat $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key | sha256sum }} + {{- end }} + {{- end }} + {{- end }} + labels: + {{- include "yugabyte.applabel" ($appLabelArgs) | indent 8 }} + {{- include "yugabyte.labels" $root | indent 8 }} + {{- if $root.Values.istioCompatibility.enabled }} + sidecar.istio.io/inject: "true" + {{- end }} + {{- if eq .name "yb-masters" }} + {{- with $root.Values.master.podLabels }}{{ toYaml . | nindent 8 }}{{ end }} + {{- else }} + {{- with $root.Values.tserver.podLabels }}{{ toYaml . | nindent 8 }}{{ end }} + {{- end }} + {{- if $root.Values.yugabytedUi.enabled }} + yugabytedUi: "true" + {{- end }} + spec: + {{- if $root.Values.Image.pullSecretName }} + imagePullSecrets: + - name: {{ $root.Values.Image.pullSecretName }} + {{ end }} + {{- if $root.Values.podSecurityContext.enabled }} + securityContext: {{- omit $root.Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if $root.Values.nodeSelector }} + nodeSelector: + {{ toYaml $root.Values.nodeSelector | indent 8 }} + {{- end }} + {{- if eq .name "yb-masters" }} # yb-masters + {{- with $root.Values.master.serviceAccount }} + serviceAccountName: {{ . }} + {{- end }} + {{- if $root.Values.master.tolerations }} + tolerations: + {{- with $root.Values.master.tolerations }}{{ toYaml . | nindent 8 }}{{ end }} + {{- end }} + {{- else }} # yb-tservers + {{- with $root.Values.tserver.serviceAccount }} + serviceAccountName: {{ . }} + {{- end }} + {{- if $root.Values.tserver.tolerations }} + tolerations: + {{- with $root.Values.tserver.tolerations }}{{ toYaml . | nindent 8 }}{{ end }} + {{- end }} + {{- end }} + terminationGracePeriodSeconds: 300 + affinity: + # Set the anti-affinity selector scope to YB masters and tservers. + {{- $nodeAffinityData := dict}} + {{- if eq .name "yb-masters" -}} + {{- $nodeAffinityData = get $root.Values.master.affinity "nodeAffinity" | default (dict) -}} + {{- else -}} + {{- $nodeAffinityData = get $root.Values.tserver.affinity "nodeAffinity" | default (dict) -}} + {{- end -}} + {{ if $root.Values.AZ }} + {{- $userSelectorTerms := dig "requiredDuringSchedulingIgnoredDuringExecution" "nodeSelectorTerms" "" $nodeAffinityData | default (list) -}} + {{- $baseAffinity := include "yugabyte.multiAZNodeAffinity" $root | fromYaml -}} + {{- $requiredSchedule := (list) -}} + {{- if $userSelectorTerms -}} + {{- range $userSelectorTerms -}} + {{- $userTerm := . -}} + {{- range $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}} + {{- $matchExpr := concat .matchExpressions $userTerm.matchExpressions | dict "matchExpressions" -}} + {{- $requiredSchedule = mustMerge $matchExpr $userTerm | append $requiredSchedule -}} + {{- end -}} + {{- end -}} + {{- else -}} + {{- $requiredSchedule = $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}} + {{- end -}} + + {{- with $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution -}} + {{- $_ := set . "nodeSelectorTerms" $requiredSchedule -}} + {{- end -}} + {{- $nodeAffinityData = mustMerge $baseAffinity $nodeAffinityData -}} + {{- end -}} + + {{- $podAntiAffinityData := dict -}} + {{- $basePodAntiAffinity := include "yugabyte.podAntiAffinity" ($appLabelArgs) | fromYaml -}} + {{- if eq .name "yb-masters" -}} + {{- with $root.Values.master.affinity -}} + {{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}} + {{- if $userPodAntiAffinity -}} + {{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}} + {{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}} + {{- end -}} + {{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}} + {{- end -}} + {{- else -}} + {{- with $root.Values.tserver.affinity -}} + {{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}} + {{- if $userPodAntiAffinity -}} + {{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}} + {{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}} + {{- end -}} + {{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}} + {{- end -}} + {{- end -}} + + {{- if eq .name "yb-masters" -}} + {{- if $nodeAffinityData -}} + {{- $_ := set $root.Values.master.affinity "nodeAffinity" $nodeAffinityData -}} + {{- end -}} + {{- $_ := set $root.Values.master.affinity "podAntiAffinity" $podAntiAffinityData -}} + {{ toYaml $root.Values.master.affinity | nindent 8 }} + {{- else -}} + {{- if $nodeAffinityData -}} + {{- $_ := set $root.Values.tserver.affinity "nodeAffinity" $nodeAffinityData -}} + {{- end -}} + {{- $_ := set $root.Values.tserver.affinity "podAntiAffinity" $podAntiAffinityData -}} + {{ toYaml $root.Values.tserver.affinity | nindent 8 }} + {{ end }} + {{- with $root.Values.dnsConfig }} + dnsConfig: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with $root.Values.dnsPolicy }} + dnsPolicy: {{ . | quote }} + {{- end }} + containers: + - name: "{{ .label }}" + image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" + imagePullPolicy: {{ $root.Values.Image.pullPolicy }} + lifecycle: + postStart: + exec: + command: + - "bash" + - "-c" + - > + mkdir -p /mnt/disk0/cores; + mkdir -p /mnt/disk0/yb-data/scripts; + if [ ! -f /mnt/disk0/yb-data/scripts/log_cleanup.sh ]; then + if [ -f /home/yugabyte/bin/log_cleanup.sh ]; then + cp /home/yugabyte/bin/log_cleanup.sh /mnt/disk0/yb-data/scripts; + fi; + fi + {{- if (and (not $root.Values.storage.ephemeral) (not $service.skipHealthChecks)) }} + livenessProbe: + exec: + command: + - bash + - -v + - -c + - | + {{- include "yugabyte.fs_data_dirs_probe" $storageInfo | nindent 14 }}; + exit_code="$?"; + echo "disk check exited with: ${exit_code}"; + exit "${exit_code}" + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + {{- if (and (eq .name "yb-masters") (include "yugabyte.master.startupProbe" $root)) }} + startupProbe: {{- include "yugabyte.master.startupProbe" $root | nindent 10 }} + {{- else if (and (eq .name "yb-tservers") (include "yugabyte.tserver.startupProbe" $root)) }} + startupProbe: {{- include "yugabyte.tserver.startupProbe" $root | nindent 10 }} + {{- end }} + {{- end }} + {{- if (and (eq .name "yb-masters") (include "yugabyte.master.readinessProbe" $root)) }} + readinessProbe: {{- include "yugabyte.master.readinessProbe" $root | nindent 10 }} + {{- else if (and (eq .name "yb-tservers") (include "yugabyte.tserver.readinessProbe" $root)) }} + readinessProbe: {{- include "yugabyte.tserver.readinessProbe" $root | nindent 10 }} + {{- end }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: YBDEVOPS_CORECOPY_DIR + value: "/mnt/disk0/cores" + {{- if eq .name "yb-masters" }} + {{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} + {{- $data := dict "secretenv" $root.Values.master.secretEnv "root" $root "suffix" "master"}} + {{- include "yugabyte.addenvsecrets" $data | nindent 8 }} + {{- else }} + {{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} + {{- $data := dict "secretenv" $root.Values.tserver.secretEnv "root" $root "suffix" "tserver" }} + {{- include "yugabyte.addenvsecrets" $data | nindent 8 }} + {{- end }} + {{- if and $root.Values.tls.enabled $root.Values.tls.clientToServer (ne .name "yb-masters") }} + - name: SSL_CERTFILE + value: /root/.yugabytedb/root.crt + {{- end }} + resources: + {{- if eq .name "yb-masters" }} +{{ toYaml $root.Values.resource.master | indent 10 }} + {{ else }} +{{ toYaml $root.Values.resource.tserver | indent 10 }} + {{ end }} + # core dumps are collected to workingDir if + # kernel.core_pattern is set to a relative path like + # core.%e.%p.%t ref: + # https://github.com/yugabyte/charts/issues/11 + workingDir: "/mnt/disk0/cores" + command: + - "/sbin/tini" + - "--" + args: + - "/bin/bash" + - "-c" + - | + {{- if and (not $root.Values.preflight.skipUlimit) (not $root.Values.preflight.skipAll) }} + if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then + /home/yugabyte/tools/k8s_preflight.py all + fi && \ + {{- end }} + {{- if (and (not $root.Values.storage.ephemeral) (not $root.Values.preflight.skipAll)) }} + {{- include "yugabyte.fs_data_dirs_probe" $storageInfo | nindent 12 }} && \ + {{- end }} + {{- $rpcAddr := include "yugabyte.rpc_bind_address" $serviceValues -}} + {{- $rpcPort := index $service.ports "tcp-rpc-port" -}} + {{- $rpcDict := dict "Addr" $rpcAddr "Port" $rpcPort -}} + {{- $rpcPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $rpcDict) -}} + {{- if $rpcPreflight -}}{{ $rpcPreflight | nindent 12 }}{{ end -}} + {{- $broadcastAddr := include "yugabyte.server_broadcast_address" $serviceValues -}} + {{/* skip bind check for servicePerPod multi-cluster, we cannot/don't bind to service IP */}} + {{- if not $root.Values.multicluster.createServicePerPod }} + {{- $broadcastPort := index $service.ports "tcp-rpc-port" -}} + {{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}} + {{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}} + {{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}} + {{- end }} + {{- $webserverAddr := include "yugabyte.webserver_interface" $serviceValues -}} + {{- $webserverPort := index $service.ports "http-ui" -}} + {{- $webserverDict := dict "Addr" $webserverAddr "Port" $webserverPort -}} + {{- $webserverPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $webserverDict) -}} + {{- if $webserverPreflight -}}{{ $webserverPreflight | nindent 12 }}{{ end }} + if [[ -f /home/yugabyte/tools/k8s_parent.py ]]; then + k8s_parent="/home/yugabyte/tools/k8s_parent.py" + else + k8s_parent="" + fi && \ + {{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }} + echo "Creating ephemeral /opt/certs/yugabyte/ as symlink to persisted /mnt/disk0/certs/" && \ + mkdir -p /mnt/disk0/certs && \ + mkdir -p /opt/certs && \ + ln -s /mnt/disk0/certs /opt/certs/yugabyte && \ + if [[ ! -f /opt/certs/yugabyte/ca.crt ]]; then + echo "Fresh install of /opt/certs/yugabyte/ca.crt" + cp /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt; + fi && \ + cmp -s /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;sameRootCA=$? && \ + if [[ $sameRootCA -eq 0 ]]; then + echo "Refreshing tls certs at /opt/certs/yugabyte/"; + cp /home/yugabyte/cert-manager/tls.crt /opt/certs/yugabyte/node.{{$rpcAddr}}.crt; + cp /home/yugabyte/cert-manager/tls.key /opt/certs/yugabyte/node.{{$rpcAddr}}.key; + chmod 600 /opt/certs/yugabyte/* + else + echo "WARNING: Not refreshing certificates as the root ca.crt has changed" + fi && \ + {{- end }} + {{- if eq .name "yb-masters" }} + exec ${k8s_parent} /home/yugabyte/bin/yb-master \ + --max_log_size="256" \ + --undefok="enable_ysql" \ + {{- if not $root.Values.storage.ephemeral }} + --fs_data_dirs={{ template "yugabyte.fs_data_dirs" $storageInfo }} \ + {{- else }} + --fs_data_dirs=/var/yugabyte \ + {{- end }} + {{- if eq $root.Values.ip_version_support "v6_only" }} + --net_address_filter=ipv6_external,ipv6_non_link_local,ipv6_all,ipv4_external,ipv4_all \ + {{- end }} + {{- if $root.Values.isMultiAz }} + --master_addresses={{ $root.Values.masterAddresses }} \ + --replication_factor={{ $root.Values.replicas.totalMasters }} \ + {{- else }} + --master_addresses={{ template "yugabyte.master_addresses" $root }} \ + --replication_factor={{ $root.Values.replicas.master }} \ + {{- end }} + {{- if not $root.Values.disableYsql }} + --enable_ysql=true \ + {{- else }} + --enable_ysql=false \ + {{- end }} + --metric_node_name=$(HOSTNAME) \ + --memory_limit_hard_bytes={{ template "yugabyte.memory_hard_limit" dict "size" $root.Values.resource.master.limits.memory "limitPercent" $root.Values.master.memoryLimitHardPercentage }} \ + --stderrthreshold=0 \ + --num_cpus={{ ceil $root.Values.resource.master.requests.cpu }} \ + --undefok=num_cpus,enable_ysql \ + {{- range $flag, $override := $root.Values.gflags.master }} + --{{ $flag }}={{ quote $override }} \ + {{- end }} + {{- if $root.Values.tls.enabled }} + --certs_dir=/opt/certs/yugabyte \ + --use_node_to_node_encryption={{ $root.Values.tls.nodeToNode }} \ + --allow_insecure_connections={{ $root.Values.tls.insecure }} \ + {{- end }} + --rpc_bind_addresses={{ $rpcAddr }} \ + --server_broadcast_addresses={{ $broadcastAddr }} \ + --webserver_interface={{ $webserverAddr }} \ + {{- if $root.Values.yugabytedUi.enabled }} + --master_enable_metrics_snapshotter={{ $root.Values.yugabytedUi.metricsSnapshotter.enabled }} \ + --metrics_snapshotter_tserver_metrics_whitelist={{ join "," $root.Values.yugabytedUi.metricsSnapshotter.whitelist }} + {{- end }} + {{- else }} + {{- $cqlAddr := include "yugabyte.cql_proxy_bind_address" $serviceValues -}} + {{- $cqlPort := index $service.ports "tcp-yql-port" -}} + {{- $cqlDict := dict "Addr" $cqlAddr "Port" $cqlPort -}} + {{- $cqlPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $cqlDict) -}} + {{- if $cqlPreflight -}}{{ $cqlPreflight | nindent 12 }}{{ end -}} + {{- $ysqlAddr := include "yugabyte.pgsql_proxy_bind_address" $serviceValues -}} + {{- $ysqlPort := index $service.ports "tcp-ysql-port" -}} + {{- if not $root.Values.disableYsql -}} + {{- $ysqlDict := dict "Addr" $ysqlAddr "Port" $ysqlPort -}} + {{- $ysqlPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $ysqlDict) -}} + {{- if $ysqlPreflight -}}{{ $ysqlPreflight | nindent 12 }}{{ end -}} + {{- end }} + exec ${k8s_parent} /home/yugabyte/bin/yb-tserver \ + --max_log_size="256" \ + --undefok="enable_ysql" \ + {{- if not $root.Values.storage.ephemeral }} + --fs_data_dirs={{ template "yugabyte.fs_data_dirs" $storageInfo }} \ + {{- else }} + --fs_data_dirs=/var/yugabyte \ + {{- end }} + {{- if eq $root.Values.ip_version_support "v6_only" }} + --net_address_filter=ipv6_external,ipv6_non_link_local,ipv6_all,ipv4_external,ipv4_all \ + {{- end }} + {{- if $root.Values.isMultiAz }} + --tserver_master_addrs={{ $root.Values.masterAddresses }} \ + {{- else }} + --tserver_master_addrs={{ template "yugabyte.master_addresses" $root }} \ + {{- end }} + --metric_node_name=$(HOSTNAME) \ + --memory_limit_hard_bytes={{ template "yugabyte.memory_hard_limit" dict "size" $root.Values.resource.tserver.limits.memory "limitPercent" $root.Values.tserver.memoryLimitHardPercentage }} \ + --stderrthreshold=0 \ + --num_cpus={{ ceil $root.Values.resource.tserver.requests.cpu }} \ + --undefok=num_cpus,enable_ysql \ + --use_node_hostname_for_local_tserver=true \ + {{- if $root.Values.authCredentials.ysql.password }} + --ysql_enable_auth=true \ + {{- if (include "yugabyte.tserver.readinessProbe" $root) }} + --ysql_hba_conf_csv="local all yugabyte trust" \ + {{- end }} + {{- end }} + {{- if or $root.Values.authCredentials.ycql.user $root.Values.authCredentials.ycql.password }} + --use_cassandra_authentication=true \ + {{- end }} + {{- range $flag, $override := $root.Values.gflags.tserver }} + --{{ $flag }}={{ quote $override }} \ + {{- end }} + {{- if $root.Values.tls.enabled }} + --certs_dir=/opt/certs/yugabyte \ + --use_node_to_node_encryption={{ $root.Values.tls.nodeToNode }} \ + --allow_insecure_connections={{ $root.Values.tls.insecure }} \ + --use_client_to_server_encryption={{ $root.Values.tls.clientToServer }} \ + --certs_for_client_dir=/opt/certs/yugabyte \ + {{- if $root.Values.tserver.serverBroadcastAddress }} + --cert_node_filename={{ include "yugabyte.server_fqdn" $serviceValues }} \ + {{- end }} + {{- end }} + --rpc_bind_addresses={{ $rpcAddr }} \ + --server_broadcast_addresses={{ $root.Values.tserver.serverBroadcastAddress | default $broadcastAddr }} \ + --webserver_interface={{ $webserverAddr }} \ + {{- if not $root.Values.disableYsql }} + --enable_ysql=true \ + --pgsql_proxy_bind_address={{ $ysqlAddr }} \ + {{- else }} + --enable_ysql=false \ + {{- end }} + --cql_proxy_bind_address={{ $cqlAddr }} \ + {{- if $root.Values.yugabytedUi.enabled }} + --tserver_enable_metrics_snapshotter={{ $root.Values.yugabytedUi.metricsSnapshotter.enabled }} \ + --metrics_snapshotter_interval_ms={{ $root.Values.yugabytedUi.metricsSnapshotter.interval }} \ + --metrics_snapshotter_tserver_metrics_whitelist={{ join "," $root.Values.yugabytedUi.metricsSnapshotter.whitelist }} + {{- end }} + {{- end }} + ports: + {{- range $label, $port := .ports }} + {{- if (eq $label "yugabyted-ui") }} + {{- if $root.Values.yugabytedUi.enabled }} + - containerPort: {{ $port }} + name: {{ $label | quote }} + {{- end }} + {{- else if not (eq $label "grpc-ybc-port") }} + - containerPort: {{ $port }} + name: {{ $label | quote }} + {{- end }} + {{- end}} + volumeMounts: + {{- if (eq .name "yb-tservers") }} + - name: tserver-tmp + mountPath: /tmp + {{- end }} + - name: debug-hooks-volume + mountPath: /opt/debug_hooks_config + {{ if not $root.Values.storage.ephemeral }} + {{- range $index := until (int ($storageInfo.count)) }} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} + mountPath: /mnt/disk{{ $index }} + {{- end }} + {{- end }} + {{- if $root.Values.tls.enabled }} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }} + readOnly: true + - name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + mountPath: /root/.yugabytedb/ + readOnly: true + {{- end }} + {{- if and (eq .name "yb-masters") ($root.Values.master.extraVolumeMounts) -}} + {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.master -}} + {{- $root.Values.master.extraVolumeMounts | toYaml | nindent 10 -}} + {{- else if and (eq .name "yb-tservers") ($root.Values.tserver.extraVolumeMounts) -}} + {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}} + {{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}} + {{- end -}} + + {{ if not $root.Values.storage.ephemeral }} + - name: yb-cleanup + image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" + imagePullPolicy: {{ $root.Values.Image.pullPolicy }} + env: + - name: USER + value: "yugabyte" + command: + - "/sbin/tini" + - "--" + args: + - "/bin/bash" + - "-c" + - > + while true; do + sleep 3600; + /home/yugabyte/scripts/log_cleanup.sh; + done + volumeMounts: + - name: {{ $root.Values.oldNamingStyle | ternary "datadir0" (printf "%s0" (include "yugabyte.volume_name" $root)) }} + mountPath: /home/yugabyte/ + subPath: yb-data + - name: {{ $root.Values.oldNamingStyle | ternary "datadir0" (printf "%s0" (include "yugabyte.volume_name" $root)) }} + mountPath: /var/yugabyte/cores + subPath: cores + {{- if $root.Values.ybCleanup.resources }} + resources: {{ toYaml $root.Values.ybCleanup.resources | nindent 10 }} + {{- end }} + {{- end }} + + {{- if $root.Values.yugabytedUi.enabled }} + - name: yugabyted-ui + image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" + imagePullPolicy: "IfNotPresent" + env: + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - "/sbin/tini" + - "--" + args: + - "/bin/bash" + - "-c" + - | + {{- if $root.Values.yugabytedUi.recoverWithoutFailure }} + while true; do + {{- end }} + /home/yugabyte/bin/yugabyted-ui \ + {{- $rpcAddr := include "yugabyte.rpc_bind_address" $serviceValues }} + -database_host={{ $rpcAddr }} \ + {{- $webserverAddr := include "yugabyte.webserver_interface" $serviceValues }} + -bind_address={{ $webserverAddr }} \ + {{- $masterPort := "7000" }} + {{- $tserverPort := "9000" }} + {{- $ysqlPort := "5433" }} + {{- $ycqlPort := "9042" }} + {{- range $root.Values.Services -}} + {{- if eq .name "yb-masters" -}} + {{- $masterPort = index .ports "http-ui" -}} + {{- else if eq .name "yb-tservers" -}} + {{- $tserverPort = index .ports "http-ui" -}} + {{- $ysqlPort = index .ports "tcp-ysql-port" -}} + {{- $ycqlPort = index .ports "tcp-yql-port" -}} + {{- end -}} + {{- end }} + -ysql_port={{ $ysqlPort }} \ + -ycql_port={{ $ycqlPort }} \ + -master_ui_port={{ $masterPort }} \ + -tserver_ui_port={{ $tserverPort }} \ + {{- if $root.Values.tls.enabled }} + -secure={{ $root.Values.tls.enabled }} \ + {{- end }} + {{- if $root.Values.authCredentials.ysql.user }} + -ysql_username={{ $root.Values.authCredentials.ysql.user }} \ + {{- end }} + {{- if $root.Values.authCredentials.ycql.user }} + -ycql_username={{ $root.Values.authCredentials.ycql.user }} \ + {{- end }} + {{- if $root.Values.authCredentials.ysql.password }} + -ysql_password={{ $root.Values.authCredentials.ysql.password }} \ + {{- end }} + {{- if $root.Values.authCredentials.ycql.password }} + -ycql_password={{ $root.Values.authCredentials.ycql.password }} \ + {{- end }} + || echo "ERROR: yugabyted-ui failed. This might be because your yugabyte \ + version is older than 2.21.0. If this is the case, set yugabytedUi.enabled to false \ + in helm to disable yugabyted-ui, or upgrade to a version 2.21.0 or newer."; \ + {{- if $root.Values.yugabytedUi.recoverWithoutFailure }} + echo "Attempting restart in {{ $root.Values.yugabytedUi.recoveryInterval }}." + trap break TERM INT; \ + sleep {{ $root.Values.yugabytedUi.recoveryInterval }} & wait; \ + trap - TERM INT; + done \ + {{- end }} + {{- end }} + + {{- if and (eq .name "yb-tservers") ($root.Values.ybc.enabled) }} + - name: yb-controller + image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" + imagePullPolicy: {{ $root.Values.Image.pullPolicy }} + lifecycle: + postStart: + exec: + command: + - "bash" + - "-c" + - > + mkdir -p /mnt/disk0/yw-data/controller/tmp; + mkdir -p /mnt/disk0/yw-data/controller/conf; + mkdir -p /mnt/disk0/ybc-data/controller/logs; + mkdir -p /tmp/yugabyte/controller; + ln -sf /mnt/disk0/ybc-data/controller/logs /tmp/yugabyte/controller; + ln -sf /mnt/disk0/yw-data/controller/bin /tmp/yugabyte/controller; + rm -f /tmp/yugabyte/controller/yb-controller.pid; + {{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }} + mkdir -p /opt/certs; + ln -sf /mnt/disk0/certs /opt/certs/yugabyte; + {{- end }} + command: + - "/sbin/tini" + - "--" + args: + - "/bin/bash" + - "-c" + - > + while true; do + sleep 60; + /home/yugabyte/tools/k8s_ybc_parent.py status || /home/yugabyte/tools/k8s_ybc_parent.py start; + done + {{- with index $service.ports "grpc-ybc-port" }} + ports: + - containerPort: {{ . }} + name: "grpc-ybc-port" + {{- end }} + volumeMounts: + - name: tserver-tmp + mountPath: /tmp + {{- if not $root.Values.storage.ephemeral }} + {{- range $index := until (int ($storageInfo.count)) }} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} + mountPath: /mnt/disk{{ $index }} + {{- end }} + {{- end }} + {{- if $root.Values.tls.enabled }} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }} + readOnly: true + {{- end }} + {{- if ($root.Values.tserver.extraVolumeMounts) -}} + {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}} + {{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}} + {{- end -}} + {{- if $root.Values.ybc.resources }} + resources: {{ toYaml $root.Values.ybc.resources | nindent 10 }} + {{- end }} + {{- end}} + + volumes: + {{- if (eq .name "yb-masters") }} + - name: debug-hooks-volume + configMap: + name: {{ include "yugabyte.fullname" $root }}-master-hooks + defaultMode: 0755 + {{- else if (eq .name "yb-tservers") }} + - name: debug-hooks-volume + configMap: + name: {{ include "yugabyte.fullname" $root }}-tserver-hooks + defaultMode: 0755 + - name: tserver-tmp + emptyDir: {} + {{- end }} + {{- if $root.Values.tls.enabled }} + {{- if $root.Values.tls.certManager.enabled }} + {{- /* certManager enabled */}} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + projected: + sources: + {{- if not $root.Values.tls.certManager.bootstrapSelfsigned }} + - secret: + name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }} + {{- end }} + - secret: + name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + {{- else }} + {{/* certManager disabled */}} + - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + secret: + secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + defaultMode: 256 + {{- end }} + - name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + secret: + secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} + {{- if $root.Values.tls.certManager.enabled }} + items: + - key: ca.crt + path: root.crt + - key: tls.crt + path: yugabytedb.crt + - key: tls.key + path: yugabytedb.key + {{- end }} + defaultMode: 256 + {{- end }} + {{- if and (eq .name "yb-masters") ($root.Values.master.extraVolumes) -}} + {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.master -}} + {{- $root.Values.master.extraVolumes | toYaml | nindent 8 -}} + {{- else if and (eq .name "yb-tservers") ($root.Values.tserver.extraVolumes) -}} + {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}} + {{- $root.Values.tserver.extraVolumes | toYaml | nindent 8 -}} + {{- end -}} +{{- if eq $root.Values.isMultiAz false }} +--- +{{/* +TODO: switch to policy/v1 completely when we stop supporting +Kubernetes versions < 1.21 +*/}} +{{- if $root.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-pdb" .label) (printf "%s-%s-pdb" (include "yugabyte.fullname" $root) .name) }} +spec: + maxUnavailable: {{ template "yugabyte.max_unavailable_for_quorum" $root }} + selector: + matchLabels: + {{- include "yugabyte.appselector" ($appLabelArgs) | indent 6 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/setup-credentials-configmap.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/setup-credentials-configmap.yaml new file mode 100644 index 00000000..ef3b548b --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/setup-credentials-configmap.yaml @@ -0,0 +1,262 @@ +{{- if or .Values.authCredentials.ycql.user .Values.authCredentials.ycql.password .Values.authCredentials.ycql.keyspace .Values.authCredentials.ysql.password .Values.authCredentials.ysql.user .Values.authCredentials.ysql.database }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "yugabyte.fullname" . }}-setup-credentials-script + namespace: "{{ .Release.Namespace }}" + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + component: "{{ .Values.Component }}" +data: + setup-credentials.sh: | + #!/bin/bash + + set -eo pipefail + + # Setup script to setup credentials + + # ----------------------------------------- + # Default Variables + # ----------------------------------------- + + readonly DEFAULT_YSQL_USER="yugabyte" + readonly DEFAULT_YSQL_PASSWORD="yugabyte" + readonly DEFAULT_YSQL_DB="yugabyte" + + readonly DEFAULT_YCQL_USER="cassandra" + readonly DEFAULT_YCQL_PASSWORD="cassandra" + + {{- range .Values.Services }} + {{- $service := . -}} + {{- if eq ($service.name) "yb-tservers" }} + readonly YSQL_PORT={{ index $service.ports "tcp-ysql-port" }} + # TODO: Update the tcp-yql-port to tcp-ycql-port in values.yaml + readonly YCQL_PORT={{ index $service.ports "tcp-yql-port" }} + {{- end }} + {{- end }} + + {{- $serviceName := .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-yb-tservers" (include "yugabyte.fullname" . )) }} + {{- $tserverFQDN := printf "%s.%s.svc.%s" $serviceName .Release.Namespace .Values.domainName }} + + prefix_ysql_cmd=( + /home/yugabyte/bin/ysqlsh -h {{ $tserverFQDN }} + -p "$YSQL_PORT" + ) + + prefix_ycql_cmd=( + /home/yugabyte/bin/ycqlsh {{ $tserverFQDN }} + "$YCQL_PORT" + ) + + {{- if .Values.tls.enabled }} + prefix_ysql_cmd+=("sslmode=require") + prefix_ycql_cmd+=(--ssl) + {{- end }} + + # ----------------------------------------- + # Variables + # ----------------------------------------- + + ysql_user= + ysql_password= + ysql_db= + ycql_user= + ycql_password= + ycql_keyspace= + + # ----------------------------------------- + # Hepler functions + # ----------------------------------------- + + cleanup() { + local exit_code=$? + echo "Exiting with code $exit_code" + exit "$exit_code" + } + + function waitUntilHealthy() { + declare -a ysql_cmd + export PGPASSWORD="$2" + ysql_cmd=( + /home/yugabyte/bin/ysqlsh -h {{ $tserverFQDN }} + -p "$3" + -U "$1" + -c "\\conninfo" + ) + + if [[ "$4" == "true" ]]; then + ysql_cmd+=("sslmode=require") + fi + + echo "${ysql_cmd[@]}" + while ! "${ysql_cmd[@]}"; do + sleep 5s + done + } + + export -f waitUntilHealthy + + get_ysql_credentials() { + [[ -n "$YSQL_USER" ]] && ysql_user="$YSQL_USER" || ysql_user="$DEFAULT_YSQL_USER" + + [[ -n "$YSQL_PASSWORD" ]] && ysql_password="$YSQL_PASSWORD" + + if [[ -z "$YSQL_PASSWORD" ]] && [[ "$ysql_user" != "$DEFAULT_YSQL_USER" ]]; then + ysql_password="$YSQL_USER" + fi + + [[ -n "$YSQL_DB" ]] && ysql_db="$YSQL_DB" + + [[ -z "$YSQL_DB" ]] && [[ -n "$YSQL_USER" ]] && ysql_db="$ysql_user" + + api="ysql" + } + + get_ycql_credentials() { + [[ -n "$YCQL_USER" ]] && ycql_user="$YCQL_USER" || ycql_user="$DEFAULT_YCQL_USER" + + [[ -n "$YCQL_PASSWORD" ]] && ycql_password="$YCQL_PASSWORD" + + if [[ -z "$YCQL_PASSWORD" ]] && [[ "$ycql_user" != "$DEFAULT_YCQL_USER" ]]; then + ycql_password="$YCQL_USER" + fi + + [[ -n "$YCQL_KEYSPACE" ]] && ycql_keyspace="$YCQL_KEYSPACE" + + [[ -z "$YCQL_KEYSPACE" ]] && [[ -n "$YCQL_USER" ]] && ycql_keyspace="$ycql_user" + + api="ycql" + } + + create_user() { + declare -a ysql_cmd + declare -a ycql_cmd + + case "$api" in + "ysql") + export PGPASSWORD="$DEFAULT_YSQL_PASSWORD" + read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}" + ysql_cmd+=( + -U "$DEFAULT_YSQL_USER" + -c "CREATE ROLE ${ysql_user} with LOGIN SUPERUSER password '${ysql_password}' ;" + -c "ALTER DATABASE ${ysql_db} OWNER TO ${ysql_user} ;" + ) + "${ysql_cmd[@]}" + ;; + "ycql") + read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}" + ycql_cmd+=( + -u "$DEFAULT_YCQL_USER" + -p "$DEFAULT_YCQL_PASSWORD" + -e "CREATE ROLE IF NOT EXISTS ${ycql_user} WITH PASSWORD = '${ycql_password}' AND LOGIN = true AND SUPERUSER = true ;" + ) + "${ycql_cmd[@]}" + ;; + *) exit 1 + esac + } + + update_password() { + declare -a ysql_cmd + declare -a ycql_cmd + + case "$api" in + "ysql") + export PGPASSWORD="$DEFAULT_YSQL_PASSWORD" + read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}" + ysql_cmd+=( + -U "$DEFAULT_YSQL_USER" + -c "ALTER ROLE ${ysql_user} WITH PASSWORD '${ysql_password}' ;" + ) + "${ysql_cmd[@]}" + ;; + "ycql") + read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}" + ycql_cmd+=( + -u "$DEFAULT_YCQL_USER" + -p "$DEFAULT_YCQL_PASSWORD" + -e "ALTER ROLE ${ycql_user} WITH PASSWORD = '${ycql_password}' ;" + ) + "${ycql_cmd[@]}" + ;; + *) exit 1 + esac + } + + create_container() { + declare -a ysql_cmd + declare -a ycql_cmd + + case "$api" in + "ysql") + export PGPASSWORD="$DEFAULT_YSQL_PASSWORD" + read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}" + ysql_cmd+=( + -U "$DEFAULT_YSQL_USER" + -c "CREATE DATABASE ${ysql_db} ;" + ) + "${ysql_cmd[@]}" + ;; + "ycql") + read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}" + ycql_cmd+=( + -u "$DEFAULT_YCQL_USER" + -p "$DEFAULT_YCQL_PASSWORD" + -e "CREATE KEYSPACE IF NOT EXISTS ${ycql_keyspace} ;" + ) + "${ycql_cmd[@]}" + ;; + *) exit 1 + esac + } + + # ----------------------------------------- + # Main + # ----------------------------------------- + + trap cleanup EXIT + + echo "Waiting for YugabyteDB to start." + if ! timeout 3m bash -c "waitUntilHealthy ${DEFAULT_YSQL_USER} ${DEFAULT_YSQL_PASSWORD} ${YSQL_PORT} {{ .Values.tls.enabled }}"; then + echo "Timeout while waiting for database" + exit 1 + fi + + # YSQL Credentials + get_ysql_credentials + + ## Create YSQL DB + if [[ -n $ysql_db ]] && [[ "$ysql_db" != "$DEFAULT_YSQL_DB" ]]; then + create_container + fi + + ## Update YSQL Password + if [[ -n $ysql_password ]] && [[ "$ysql_password" != "$DEFAULT_YSQL_PASSWORD" ]] && [[ "$ysql_user" == "$DEFAULT_YSQL_USER" ]]; then + update_password + fi + + ## Create YSQL User + if [[ -n $ysql_user ]] && [[ "$ysql_user" != "$DEFAULT_YSQL_USER" ]]; then + create_user + fi + + # YCQL Credentials + get_ycql_credentials + + ## Create YCQL Keyspace + if [[ -n $ycql_keyspace ]] && [[ -n "$ycql_keyspace" ]]; then + create_container + fi + + ## Update YCQL Password + if [[ -n $ycql_password ]] && [[ "$ycql_password" != "$DEFAULT_YCQL_PASSWORD" ]] && [[ "$ycql_user" == "$DEFAULT_YCQL_USER" ]]; then + update_password + fi + + ## Create YCQL User + if [[ -n $ycql_user ]] && [[ "$ycql_user" != "$DEFAULT_YCQL_USER" ]]; then + create_user + fi + +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/templates/tserver-servicemonitor.yaml b/addons/yugabytedb/2024/chart/yugabytedb/templates/tserver-servicemonitor.yaml new file mode 100644 index 00000000..64b82641 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/templates/tserver-servicemonitor.yaml @@ -0,0 +1,115 @@ +{{- $sm := .Values.serviceMonitor }} +{{ if and $sm.enabled (or $sm.tserver.enabled $sm.ycql.enabled $sm.ysql.enabled $sm.yedis.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "yugabyte.fullname" . }}-yb-tserver + labels: + {{- if .Values.oldNamingStyle }} + app: "yb-tserver" + {{- else }} + app.kubernetes.io/name: "yb-tserver" + {{- end }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + component: "{{ .Values.Component }}" + {{- with .Values.serviceMonitor.extraLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: "release" + selector: + matchLabels: + {{- if .Values.oldNamingStyle }} + app: "yb-tserver" + {{- else }} + app.kubernetes.io/name: "yb-tserver" + {{- end }} + release: {{ .Release.Name | quote }} + service-type: "headless" + endpoints: + + {{- with .Values.serviceMonitor.tserver }} + {{- if .enabled }} + - port: {{ .port }} + path: {{ .path }} + {{- if .interval }} + interval: {{ .interval }} + {{- else }} + interval: {{ $.Values.serviceMonitor.interval }} + {{- end }} + relabelings: + - targetLabel: "group" + replacement: "yb-tserver" + - targetLabel: "export_type" + replacement: "tserver_export" + - targetLabel: "node_prefix" + replacement: {{ $.Release.Name | quote }} + metricRelabelings: + {{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }} + {{- end }} + {{- end }} + + {{- with .Values.serviceMonitor.ycql }} + {{- if .enabled }} + - port: {{ .port }} + path: {{ .path }} + {{- if .interval }} + interval: {{ .interval }} + {{- else }} + interval: {{ $.Values.serviceMonitor.interval }} + {{- end }} + relabelings: + - targetLabel: "group" + replacement: "ycql" + - targetLabel: "export_type" + replacement: "cql_export" + - targetLabel: "node_prefix" + replacement: {{ $.Release.Name | quote }} + metricRelabelings: + {{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }} + {{- end }} + {{- end }} + + {{- with .Values.serviceMonitor.ysql }} + {{- if .enabled }} + - port: {{ .port }} + path: {{ .path }} + {{- if .interval }} + interval: {{ .interval }} + {{- else }} + interval: {{ $.Values.serviceMonitor.interval }} + {{- end }} + relabelings: + - targetLabel: "group" + replacement: "ysql" + - targetLabel: "export_type" + replacement: "ysql_export" + - targetLabel: "node_prefix" + replacement: {{ $.Release.Name | quote }} + metricRelabelings: + {{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }} + {{- end }} + {{- end }} + + {{- with .Values.serviceMonitor.yedis }} + {{- if .enabled }} + - port: {{ .port }} + path: {{ .path }} + {{- if .interval }} + interval: {{ .interval }} + {{- else }} + interval: {{ $.Values.serviceMonitor.interval }} + {{- end }} + relabelings: + - targetLabel: "group" + replacement: "yedis" + - targetLabel: "export_type" + replacement: "redis_export" + - targetLabel: "node_prefix" + replacement: {{ $.Release.Name | quote }} + metricRelabelings: + {{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/yugabytedb/2024/chart/yugabytedb/tests/README.md b/addons/yugabytedb/2024/chart/yugabytedb/tests/README.md new file mode 100644 index 00000000..e7360384 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/tests/README.md @@ -0,0 +1,19 @@ +# Unit Testing Helm charts +Unit tests for the yugabyte helm charts, which can be used to validate helm templates +gives us our expected results. + +This is leveraging https://github.com/helm-unittest/helm-unittest + +See https://github.com/quintush/helm-unittest/blob/master/DOCUMENT.md for details on creating new +tests + +## Install +``` +$ helm plugin install https://github.com/helm-unittest/helm-unittest.git +``` + +## Run tests +``` +$ cd stable/yugabyte +$ helm unittest -f tests/test_*.yaml . +``` diff --git a/addons/yugabytedb/2024/chart/yugabytedb/tests/test_affinity_merges.yaml b/addons/yugabytedb/2024/chart/yugabytedb/tests/test_affinity_merges.yaml new file mode 100644 index 00000000..9671e842 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/tests/test_affinity_merges.yaml @@ -0,0 +1,191 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/quintush/helm-unittest/master/schema/helm-testsuite.json +suite: nodeAffinity and podAntiAffinity Merge +templates: +- service.yaml +tests: +- it: Test with AZ + values: + - ./values_affinity_merge.yaml + set: + AZ: test_az + oldNamingStyle: true + asserts: + - isSubset: + path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + content: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - test_az + - key: custom_label_1 + operator: In + values: + - custom_value_1 + matchFields: + - key: metadata.name + operator: In + values: + - name + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - test_az + - key: custom_label_1 + operator: In + values: + - custom_value_1 + matchFields: + - key: metadata.name + operator: In + values: + - name + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - test_az + - key: custom_label_2 + operator: In + values: + - custom_value_2 + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - test_az + - key: custom_label_2 + operator: In + values: + - custom_value_2 + documentIndex: 2 + - isSubset: + path: spec.template.spec.affinity.podAntiAffinity + content: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "yb-master" + topologyKey: kubernetes.io/hostname + - weight: 30 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: paa_key + operator: In + values: + - paa_value + documentIndex: 2 + - isSubset: + path: spec.template.spec.affinity.nodeAffinity + content: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - test_az + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - test_az + preferredDuringSchedulingIgnoredDuringExecution: + random_key: random_value + documentIndex: 6 + - isSubset: + path: spec.template.spec.affinity.podAntiAffinity + content: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - yb-tserver + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + random_key2: random_value2 + documentIndex: 6 +- it: Test without AZ + values: + - ./values_affinity_merge.yaml + set: + AZ: false + oldNamingStyle: true + asserts: + - isSubset: + path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution + content: + nodeSelectorTerms: + - matchExpressions: + - key: custom_label_1 + operator: In + values: + - custom_value_1 + matchFields: + - key: metadata.name + operator: In + values: + - name + - matchExpressions: + - key: custom_label_2 + operator: In + values: + - custom_value_2 + documentIndex: 2 + - isSubset: + path: spec.template.spec.affinity.podAntiAffinity + content: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "yb-master" + topologyKey: kubernetes.io/hostname + - weight: 30 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: paa_key + operator: In + values: + - paa_value + documentIndex: 2 + - isSubset: + path: spec.template.spec.affinity.nodeAffinity + content: + preferredDuringSchedulingIgnoredDuringExecution: + random_key: random_value + documentIndex: 6 + - isSubset: + path: spec.template.spec.affinity.podAntiAffinity + content: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - yb-tserver + topologyKey: kubernetes.io/hostname + requiredDuringSchedulingIgnoredDuringExecution: + random_key2: random_value2 + documentIndex: 6 + diff --git a/addons/yugabytedb/2024/chart/yugabytedb/tests/values_affinity_merge.yaml b/addons/yugabytedb/2024/chart/yugabytedb/tests/values_affinity_merge.yaml new file mode 100644 index 00000000..b3de3398 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/tests/values_affinity_merge.yaml @@ -0,0 +1,66 @@ +master: + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core + ## This might override the default affinity from service.yaml + ## Example. + # affinity: + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - "yb-master" + # topologyKey: kubernetes.io/hostname + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: custom_label_1 + operator: In + values: + - custom_value_1 + matchFields: + - key: metadata.name + operator: In + values: + - name + - matchExpressions: + - key: custom_label_2 + operator: In + values: + - custom_value_2 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 30 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: paa_key + operator: In + values: + - paa_value + +tserver: + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core + ## This might override the default affinity from service.yaml + ## Example. + # affinity: + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - "yb-tserver" + # topologyKey: kubernetes.io/hostname + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + random_key: random_value + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + random_key2: random_value2 + diff --git a/addons/yugabytedb/2024/chart/yugabytedb/values.yaml b/addons/yugabytedb/2024/chart/yugabytedb/values.yaml new file mode 100644 index 00000000..ce6f6da5 --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/values.yaml @@ -0,0 +1,691 @@ +# Default values for yugabyte. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +Component: "yugabytedb" + +fullnameOverride: "" +nameOverride: "" + +Image: + repository: "yugabytedb/yugabyte" + tag: 2024.1.3.0-b105 + pullPolicy: IfNotPresent + pullSecretName: "" + +storage: + ephemeral: false # will not allocate PVs when true + master: + count: 2 + size: 10Gi + storageClass: "" + tserver: + count: 2 + size: 10Gi + storageClass: "" + +resource: + master: + requests: + cpu: "2" + memory: 2Gi + limits: + cpu: "2" + ## Ensure the 'memory' value is strictly in 'Gi' or 'G' format. Deviating from these formats + ## may result in setting an incorrect value for the 'memory_limit_hard_bytes' flag. + ## Avoid using floating numbers for the numeric part of 'memory'. Doing so may lead to + ## the 'memory_limit_hard_bytes' being set to 0, as the function expects integer values. + memory: 2Gi + tserver: + requests: + cpu: "2" + memory: 4Gi + limits: + cpu: "2" + ## Ensure the 'memory' value is strictly in 'Gi' or 'G' format. Deviating from these formats + ## may result in setting an incorrect value for the 'memory_limit_hard_bytes' flag. + ## Avoid using floating numbers for the numeric part of 'memory'. Doing so may lead to + ## the 'memory_limit_hard_bytes' being set to 0, as the function expects integer values. + memory: 4Gi + +replicas: + master: 3 + tserver: 3 + ## Used to set replication factor when isMultiAz is set to true + totalMasters: 3 + +partition: + master: 0 + tserver: 0 + +updateStrategy: + type: RollingUpdate + +persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete +# Used in Multi-AZ setup +masterAddresses: "" + +isMultiAz: false +AZ: "" + +# Disable the YSQL +disableYsql: false + +tls: + # Set to true to enable the TLS. + enabled: false + nodeToNode: true + clientToServer: true + # Set to false to disallow any service with unencrypted communication from joining this cluster + insecure: false + # Set enabled to true to use cert-manager instead of providing your own rootCA + certManager: + enabled: false + # Will create own ca certificate and issuer when set to true + bootstrapSelfsigned: true + # Use ClusterIssuer when set to true, otherwise use Issuer + useClusterIssuer: false + # Name of ClusterIssuer to use when useClusterIssuer is true + clusterIssuer: cluster-ca + # Name of Issuer to use when useClusterIssuer is false + issuer: yugabyte-ca + certificates: + # The lifetime before cert-manager will issue a new certificate. + # The re-issued certificates will not be automatically reloaded by the service. + # It is necessary to provide some external means of restarting the pods. + duration: 2160h # 90d + renewBefore: 360h # 15d + algorithm: RSA # ECDSA or RSA + # Can be 2048, 4096 or 8192 for RSA + # Or 256, 384 or 521 for ECDSA + keySize: 2048 + + ## When certManager.enabled=false, rootCA.cert and rootCA.key are used to generate TLS certs. + ## When certManager.enabled=true and boostrapSelfsigned=true, rootCA is ignored. + ## When certManager.enabled=true and bootstrapSelfsigned=false, only rootCA.cert is used + ## to verify TLS certs generated and signed by the external provider. + rootCA: + cert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFXTVJRd0VnWURWUVFERXd0WmRXZGgKWW5sMFpTQkVRakFlRncweE9UQXlNRGd3TURRd01qSmFGdzB5T1RBeU1EVXdNRFF3TWpKYU1CWXhGREFTQmdOVgpCQU1UQzFsMVoyRmllWFJsSUVSQ01JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCnVOMWF1aWc4b2pVMHM0OXF3QXhrT2FCaHkwcTlyaVg2akVyZWJyTHJOWDJOeHdWQmNVcWJkUlhVc3VZNS96RUQKUC9CZTNkcTFuMm9EQ2ZGVEwweGkyNFdNZExRcnJBMndCdzFtNHM1WmQzcEJ1U04yWHJkVVhkeUx6dUxlczJNbgovckJxcWRscXp6LzAyTk9TOE9SVFZCUVRTQTBSOFNMQ1RjSGxMQmRkMmdxZ1ZmemVXRlVObXhWQ2EwcHA5UENuCmpUamJJRzhJWkh5dnBkTyt3aURQM1Y1a1ZEaTkvbEtUaGUzcTFOeDg5VUNFcnRJa1pjSkYvWEs3aE90MU1sOXMKWDYzb2lVMTE1Q2svbGFGRjR6dWgrZk9VenpOVXRXeTc2RE92cm5pVGlaU0tQZDBBODNNa2l2N2VHaDVkV3owWgpsKzJ2a3dkZHJaRzVlaHhvbGhGS3pRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQXFRd0hRWURWUjBsCkJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEQjVRbmlYd1ptdk52eG5VbS9sTTVFbms3VmhTUzRUZldIMHY4Q0srZWZMSVBTbwpVTkdLNXU5UzNEUWlvaU9SN1Vmc2YrRnk1QXljMmNUY1M2UXBxTCt0V1QrU1VITXNJNk9oQ05pQ1gvQjNKWERPCkd2R0RIQzBVOHo3aWJTcW5zQ2Rid05kajAyM0lwMHVqNE9DVHJ3azZjd0RBeXlwVWkwN2tkd28xYWJIWExqTnAKamVQMkwrY0hkc2dKM1N4WWpkK1kvei9IdmFrZG1RZDJTL1l2V0R3aU1SRDkrYmZXWkJVRHo3Y0QyQkxEVmU0aAp1bkFaK3NyelR2Sjd5dkVodzlHSDFyajd4Qm9VNjB5SUUrYSszK2xWSEs4WnBSV0NXMnh2eWNrYXJSKytPS2NKClFsL04wWExqNWJRUDVoUzdhOTdhQktTamNqY3E5VzNGcnhJa2tKST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdU4xYXVpZzhvalUwczQ5cXdBeGtPYUJoeTBxOXJpWDZqRXJlYnJMck5YMk54d1ZCCmNVcWJkUlhVc3VZNS96RURQL0JlM2RxMW4yb0RDZkZUTDB4aTI0V01kTFFyckEyd0J3MW00czVaZDNwQnVTTjIKWHJkVVhkeUx6dUxlczJNbi9yQnFxZGxxenovMDJOT1M4T1JUVkJRVFNBMFI4U0xDVGNIbExCZGQyZ3FnVmZ6ZQpXRlVObXhWQ2EwcHA5UENualRqYklHOElaSHl2cGRPK3dpRFAzVjVrVkRpOS9sS1RoZTNxMU54ODlVQ0VydElrClpjSkYvWEs3aE90MU1sOXNYNjNvaVUxMTVDay9sYUZGNHp1aCtmT1V6ek5VdFd5NzZET3ZybmlUaVpTS1BkMEEKODNNa2l2N2VHaDVkV3owWmwrMnZrd2RkclpHNWVoeG9saEZLelFJREFRQUJBb0lCQUJsdW1tU3gxR1djWER1Mwpwei8wZEhWWkV4c2NsU3U0SGRmZkZPcTF3cFlCUjlmeGFTZGsxQzR2YXF1UjhMaWl6WWVtVWViRGgraitkSnlSCmpwZ2JNaDV4S1BtRkw5empwU3ZUTkN4UHB3OUF5bm5sM3dyNHZhcU1CTS9aZGpuSGttRC9kQzBadEEvL0JIZ3YKNHk4d3VpWCsvUWdVaER0Z1JNcmR1ZUZ1OVlKaFo5UE9jYXkzSkkzMFhEYjdJSS9vNFNhYnhTcFI3bTg5WjY0NwpUb3hsOEhTSzl0SUQxbkl1bHVpTmx1dHI1RzdDdE93WTBSc2N5dmZ2elg4a1d2akpLZVJVbmhMSCtXVFZOaExICjdZc0tMNmlLa1NkckMzeWVPWnV4R0pEbVdrZVgxTzNPRUVGYkc4TjVEaGNqL0lXbDh1dGt3LzYwTEthNHBCS2cKTXhtNEx3RUNnWUVBNnlPRkhNY2pncHYxLzlHZC8yb3c2YmZKcTFjM1dqQkV2cnM2ZXNyMzgrU3UvdVFneXJNcAo5V01oZElpb2dYZjVlNjV5ZlIzYVBXcjJJdWMxZ0RUNlYycDZFR2h0NysyQkF1YkIzczloZisycVNRY1lkS3pmCnJOTDdKalE4ZEVGZWdYd041cHhKOTRTTVFZNEI4Qm9hOHNJWTd3TzU4dHpVMjZoclVnanFXQ1VDZ1lFQXlVUUIKNzViWlh6MGJ5cEc5NjNwYVp0bGlJY0cvUk1XMnVPOE9rVFNYSGdDSjBob25uRm5IMGZOc1pGTHdFWEtnTTRORworU3ZNbWtUekE5eVVSMHpIMFJ4UW44L1YzVWZLT2k5RktFeWx6NzNiRkV6ZW1QSEppQm12NWQ4ZTlOenZmU0E0CkdpRTYrYnFyV3VVWWRoRWlYTnY1SFNPZ3I4bUx1TzJDbGlmNTg0a0NnWUFlZzlDTmlJWmlOODAzOHNNWFYzZWIKalI5ZDNnYXY3SjJ2UnVyeTdvNDVGNDlpUXNiQ3AzZWxnY1RnczY5eWhkaFpwYXp6OGNEVndhREpyTW16cHF4cQpWY1liaFFIblppSWM5MGRubS9BaVF2eWJWNUZqNnQ5b05VVWtreGpaV1haalJXOGtZMW55QmtDUmJWVnhER0k4CjZOV0ZoeTFGaUVVVGNJcms3WVZFQlFLQmdRREpHTVIrYWRFamtlRlUwNjVadkZUYmN0VFVPY3dzb1Foalc2akkKZVMyTThxakNYeE80NnhQMnVTeFNTWFJKV3FpckQ3NDRkUVRvRjRCaEdXS21veGI3M3pqSGxWaHcwcXhDMnJ4VQorZENxODE0VXVJR3BlOTBMdWU3QTFlRU9kRHB1WVdUczVzc1FmdTE3MG5CUWQrcEhzaHNFZkhhdmJjZkhyTGpQCjQzMmhVUUtCZ1FDZ3hMZG5Pd2JMaHZLVkhhdTdPVXQxbGpUT240SnB5bHpnb3hFRXpzaDhDK0ZKUUQ1bkFxZXEKZUpWSkNCd2VkallBSDR6MUV3cHJjWnJIN3IyUTBqT2ZFallwU1dkZGxXaWh4OTNYODZ0aG83UzJuUlYrN1hNcQpPVW9ZcVZ1WGlGMWdMM1NGeHZqMHhxV3l0d0NPTW5DZGFCb0M0Tkw3enJtL0lZOEUwSkw2MkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=" + ## When tls.certManager.enabled=false + ## nodeCert and clientCert will be used only when rootCA.key is empty. + ## Will be ignored and genSignedCert will be used to generate + ## node and client certs if rootCA.key is provided. + ## cert and key are base64 encoded content of certificate and key. + nodeCert: + cert: "" + key: "" + clientCert: + cert: "" + key: "" + +gflags: + master: + default_memory_limit_to_ram_ratio: 0.85 + tserver: {} +# use_cassandra_authentication: false + +yugabytedUi: + enabled: true + # If recoverWithoutFailure is true, yugabyted-ui will automatically try to restart itself + # instead of failing and causing the pod to restart + recoverWithoutFailure: true + recoveryInterval: 30s + # metricsSnapshotter must be enabled for yugabytedUi to properly display metrics + metricsSnapshotter: + enabled: true + # time between each metric snapshot in ms + interval: 11000 + whitelist: + - handler_latency_yb_tserver_TabletServerService_Read_count + - handler_latency_yb_tserver_TabletServerService_Write_count + - handler_latency_yb_tserver_TabletServerService_Read_sum + - handler_latency_yb_tserver_TabletServerService_Write_sum + - disk_usage + - cpu_usage + - node_up + +PodManagementPolicy: Parallel + +enableLoadBalancer: true + +ybc: + enabled: false + ## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container + ## Use the above link to learn more about Kubernetes resources configuration. + # resources: + # requests: + # cpu: "1" + # memory: 1Gi + # limits: + # cpu: "1" + # memory: 1Gi + +ybCleanup: {} + ## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container + ## Use the above link to learn more about Kubernetes resources configuration. + # resources: + # requests: + # cpu: "1" + # memory: 1Gi + # limits: + # cpu: "1" + # memory: 1Gi + +domainName: "cluster.local" +service: + type: "LoadBalancer" + +serviceEndpoints: + - name: "yb-master-ui" + type: LoadBalancer + annotations: {} + clusterIP: "" + ## Sets the Service's externalTrafficPolicy + externalTrafficPolicy: "" + app: "yb-master" + loadBalancerIP: "" + ports: + http-ui: "7000" + + - name: "yb-tserver-service" + type: LoadBalancer + annotations: {} + clusterIP: "" + ## Sets the Service's externalTrafficPolicy + externalTrafficPolicy: "" + app: "yb-tserver" + loadBalancerIP: "" + ports: + tcp-yql-port: "9042" + tcp-yedis-port: "6379" + tcp-ysql-port: "5433" + + - name: "yugabyted-ui-service" + type: LoadBalancer + annotations: {} + clusterIP: "" + ## Sets the Service's externalTrafficPolicy + externalTrafficPolicy: "" + app: "yb-master" + loadBalancerIP: "" + sessionAffinity: ClientIP + ports: + yugabyted-ui: "15433" + +Services: + - name: "yb-masters" + label: "yb-master" + skipHealthChecks: false + memory_limit_to_ram_ratio: 0.85 + ports: + http-ui: "7000" + tcp-rpc-port: "7100" + yugabyted-ui: "15433" + + - name: "yb-tservers" + label: "yb-tserver" + skipHealthChecks: false + ports: + http-ui: "9000" + tcp-rpc-port: "9100" + tcp-yql-port: "9042" + tcp-yedis-port: "6379" + tcp-ysql-port: "5433" + http-ycql-met: "12000" + http-yedis-met: "11000" + http-ysql-met: "13000" + grpc-ybc-port: "18018" + yugabyted-ui: "15433" + + +## Should be set to true only if Istio is being used. This also adds +## the Istio sidecar injection labels to the pods. +## TODO: remove this once +## https://github.com/yugabyte/yugabyte-db/issues/5641 is fixed. +## +istioCompatibility: + enabled: false + +## Settings required when using multicluster environment. +multicluster: + ## Creates a ClusterIP service for each yb-master and yb-tserver + ## pod. + createServicePerPod: false + ## creates a ClusterIP service whos name does not have release name + ## in it. A common service across different clusters for automatic + ## failover. Useful when using new naming style. + createCommonTserverService: false + + ## Enable it to deploy YugabyteDB in a multi-cluster services enabled + ## Kubernetes cluster (KEP-1645). This will create ServiceExport. + ## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export + ## You can use this gist for the reference to deploy the YugabyteDB in a multi-cluster scenario. + ## Gist - https://gist.github.com/baba230896/78cc9bb6f4ba0b3d0e611cd49ed201bf + createServiceExports: false + + ## Mandatory variable when createServiceExports is set to true. + ## Use: In case of GKE, you need to pass GKE Hub Membership Name. + ## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#enabling + kubernetesClusterId: "" + + ## mcsApiVersion is used for the MCS resources created by the + ## chart. Set to net.gke.io/v1 when using GKE MCS. + mcsApiVersion: "multicluster.x-k8s.io/v1alpha1" + +serviceMonitor: + ## If true, two ServiceMonitor CRs are created. One for yb-master + ## and one for yb-tserver + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor + ## + enabled: false + ## interval is the default scrape_interval for all the endpoints + interval: 30s + ## extraLabels can be used to add labels to the ServiceMonitors + ## being created + extraLabels: {} + # release: prom + + ## Configurations of ServiceMonitor for yb-master + master: + enabled: true + port: "http-ui" + interval: "" + path: "/prometheus-metrics" + + ## Configurations of ServiceMonitor for yb-tserver + tserver: + enabled: true + port: "http-ui" + interval: "" + path: "/prometheus-metrics" + ycql: + enabled: true + port: "http-ycql-met" + interval: "" + path: "/prometheus-metrics" + ysql: + enabled: true + port: "http-ysql-met" + interval: "" + path: "/prometheus-metrics" + yedis: + enabled: true + port: "http-yedis-met" + interval: "" + path: "/prometheus-metrics" + + commonMetricRelabelings: + # https://git.io/JJW5p + # Save the name of the metric so we can group_by since we cannot by __name__ directly... + - sourceLabels: ["__name__"] + regex: "(.*)" + targetLabel: "saved_name" + replacement: "$1" + # The following basically retrofit the handler_latency_* metrics to label format. + - sourceLabels: ["__name__"] + regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)" + targetLabel: "server_type" + replacement: "$1" + - sourceLabels: ["__name__"] + regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)" + targetLabel: "service_type" + replacement: "$2" + - sourceLabels: ["__name__"] + regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?" + targetLabel: "service_method" + replacement: "$3" + - sourceLabels: ["__name__"] + regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?" + targetLabel: "__name__" + replacement: "rpc_latency$4" + +resources: {} + +nodeSelector: {} + +affinity: {} + +statefulSetAnnotations: {} + +networkAnnotation: {} + +commonLabels: {} + +## @param dnsPolicy DNS Policy for pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsPolicy: ClusterFirst +dnsPolicy: "" +## @param dnsConfig DNS Configuration pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsConfig: +## options: +## - name: ndots +## value: "4" +dnsConfig: {} + + +master: + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core + ## This might override the default affinity from service.yaml + # To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes + # has. Each new node selector term is ORed together, and each match expression or match field in + # a single selector is ANDed together. + # This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value + # 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity + # terms. + # + # Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked. + # The pod that achieves the highest weight is selected. + ## Example. + # affinity: + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - "yb-master" + # topologyKey: kubernetes.io/hostname + # + # For further examples, see examples/yugabyte/affinity_overrides.yaml + affinity: {} + + ## Extra environment variables passed to the Master pods. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core + ## Example: + # extraEnv: + # - name: NODE_IP + # valueFrom: + # fieldRef: + # fieldPath: status.hostIP + extraEnv: [] + + # secretEnv variables are used to expose secrets data as env variables in the master pod. + # TODO Add namespace also to support copying secrets from other namespace. + # secretEnv: + # - name: MYSQL_LDAP_PASSWORD + # valueFrom: + # secretKeyRef: + # name: secretName + # key: password + secretEnv: [] + + ## Annotations to be added to the Master pods. + podAnnotations: {} + + ## Labels to be added to the Master pods. + podLabels: {} + + ## Tolerations to be added to the Master pods. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core + ## Example: + # tolerations: + # - key: dedicated + # operator: Equal + # value: experimental + # effect: NoSchedule + tolerations: [] + + ## Extra volumes + ## extraVolumesMounts are mandatory for each extraVolumes. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core + ## Example: + # extraVolumes: + # - name: custom-nfs-vol + # persistentVolumeClaim: + # claimName: some-nfs-claim + extraVolumes: [] + + ## Extra volume mounts + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core + ## Example: + # extraVolumeMounts: + # - name: custom-nfs-vol + # mountPath: /home/yugabyte/nfs-backup + extraVolumeMounts: [] + + ## Set service account for master DB pods. The service account + ## should exist in the namespace where the master DB pods are brought up. + serviceAccount: "" + + ## Memory limit hard % (between 1-100) of the memory limit. + memoryLimitHardPercentage: 85 + + ## Readiness Probe + readinessProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + + ## Custom readinessProbe that overrides the default one + ## Example: HTTP based Master readinessProbe + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 20 + # timeoutSeconds: 10 + # failureThreshold: 3 + # successThreshold: 1 + # httpGet: + # path: / + # port: 7000 + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes + customReadinessProbe: {} + + ## Startup probe + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + + ## Custom startupProbe that overrides the default one + ## Enabling it will overrides the tserver.startupProbe + ## Example: HTTP based Master startupProbe + # customStartupProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 3 + # successThreshold: 1 + # httpGet: + # path: / + # port: 7000 + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + customStartupProbe: {} + + +tserver: + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core + ## This might override the default affinity from service.yaml + # To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes + # has. Each new node selector term is ORed together, and each match expression or match field in + # a single selector is ANDed together. + # This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value + # 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity + # terms. + # + # Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked. + # The pod that achieves the highest weight is selected. + ## Example. + # affinity: + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - "yb-tserver" + # topologyKey: kubernetes.io/hostname + # For further examples, see examples/yugabyte/affinity_overrides.yaml + affinity: {} + + ## Extra environment variables passed to the TServer pods. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core + ## Example: + # extraEnv: + # - name: NODE_IP + # valueFrom: + # fieldRef: + # fieldPath: status.hostIP + extraEnv: [] + + ## secretEnv variables are used to expose secrets data as env variables in the tserver pods. + ## If namespace field is not specified we assume that user already + ## created the secret in the same namespace as DB pods. + ## Example + # secretEnv: + # - name: MYSQL_LDAP_PASSWORD + # valueFrom: + # secretKeyRef: + # name: secretName + # namespace: my-other-namespace-with-ldap-secret + # key: password + secretEnv: [] + + ## Annotations to be added to the TServer pods. + podAnnotations: {} + + ## Labels to be added to the TServer pods. + podLabels: {} + + ## Tolerations to be added to the TServer pods. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core + ## Example: + # tolerations: + # - key: dedicated + # operator: Equal + # value: experimental + # effect: NoSchedule + tolerations: [] + + ## Sets the --server_broadcast_addresses flag on the TServer, no + ## preflight checks are done for this address. You might need to add + ## `use_private_ip: cloud` to the gflags.master and gflags.tserver. + serverBroadcastAddress: "" + + ## Extra volumes + ## extraVolumesMounts are mandatory for each extraVolumes. + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core + ## Example: + # extraVolumes: + # - name: custom-nfs-vol + # persistentVolumeClaim: + # claimName: some-nfs-claim + extraVolumes: [] + + ## Extra volume mounts + ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core + ## Example: + # extraVolumeMounts: + # - name: custom-nfs-vol + # path: /home/yugabyte/nfs-backup + extraVolumeMounts: [] + + ## Set service account for tserver DB pods. The service account + ## should exist in the namespace where the tserver DB pods are brought up. + serviceAccount: "" + + ## Memory limit hard % (between 1-100) of the memory limit. + memoryLimitHardPercentage: 85 + + ## Readiness Probe + readinessProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + + ## Custom readinessProbe that overrides the default one + ## Enabling it will overrides the tserver.readinessProbe + ## Example: HTTP based Tserver readinessProbe + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 20 + # timeoutSeconds: 10 + # httpGet: + # path: / + # port: 9000 + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes + customReadinessProbe: {} + + ## Startup probe + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + + ## Custom startupProbe that overrides the default one + ## Enabling it will overrides the tserver.startupProbe + ## Example: HTTP based Tserver startupProbe + # customStartupProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 3 + # successThreshold: 1 + # httpGet: + # path: / + # port: 9000 + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + customStartupProbe: {} + + +helm2Legacy: false + +ip_version_support: "v4_only" # v4_only, v6_only are the only supported values at the moment + +# For more https://docs.yugabyte.com/latest/reference/configuration/yugabyted/#environment-variables +authCredentials: + ysql: + user: "yugabyte" + password: "yugabyte" + database: "yugabyte" + ycql: + user: "" + password: "" + keyspace: "" + +oldNamingStyle: false + +preflight: + # Set to true to skip disk IO check, DNS address resolution, and + # port bind checks + skipAll: false + # Set to true to skip port bind checks + skipBind: false + + ## Set to true to skip ulimit verification + ## SkipAll has higher priority + skipUlimit: false + +## Pod securityContext +## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context +## The following configuration runs YB-Master and YB-TServer as a non-root user +podSecurityContext: + enabled: false + ## Mark it false, if you want to stop the non root user validation + runAsNonRoot: true + fsGroup: 10001 + runAsUser: 10001 + runAsGroup: 10001 + +## Added to handle old universe which has volume annotations +## K8s universe <= 2.5 to >= 2.6 +legacyVolumeClaimAnnotations: false diff --git a/addons/yugabytedb/2024/chart/yugabytedb/yugabyte-rbac.yaml b/addons/yugabytedb/2024/chart/yugabytedb/yugabyte-rbac.yaml new file mode 100644 index 00000000..d021fbcb --- /dev/null +++ b/addons/yugabytedb/2024/chart/yugabytedb/yugabyte-rbac.yaml @@ -0,0 +1,19 @@ +# Create YugaByte specific service account +apiVersion: v1 +kind: ServiceAccount +metadata: + name: yugabyte-helm + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: yugabyte-helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: yugabyte-helm + namespace: kube-system diff --git a/addons/yugabytedb/2024/meta.yaml b/addons/yugabytedb/2024/meta.yaml new file mode 100644 index 00000000..d2f1f5c5 --- /dev/null +++ b/addons/yugabytedb/2024/meta.yaml @@ -0,0 +1,24 @@ +name: yugabytedb +version: 2024 +id: 1f8087cd-d210-44ba-ba84-344f752252b7 +description: "yugabytedb." +displayName: "yugabytedb" +metadata: + displayName: "yugabytedb" + provider: + name: drycc + supportURL: https://docs.yugabyte.com + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/yugabytedb +tags: yugabytedb +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +archive: false diff --git a/addons/yugabytedb/2024/plans/standard-1c2g3w10/bind.yaml b/addons/yugabytedb/2024/plans/standard-1c2g3w10/bind.yaml new file mode 100644 index 00000000..e1ea2b53 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-1c2g3w10/bind.yaml @@ -0,0 +1,33 @@ +credential: + - name: TSERVER_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .spec.clusterIP }' + - name: TSERVER_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .spec.clusterIP }' + - name: MASTER_UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: MASTER_UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .spec.clusterIP }' + - name: TSERVER_PORT + value: "5433" \ No newline at end of file diff --git a/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/yugabytedb/2024/plans/standard-1c2g3w10/meta.yaml b/addons/yugabytedb/2024/plans/standard-1c2g3w10/meta.yaml new file mode 100644 index 00000000..549fa994 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-1c2g3w10/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g3w10" +id: 2b65e52b-7d7f-4e41-83b8-9a53965ce314 +description: "ZooKeeper standard-1c2g3w plan which limit resources 1 core 2G memory 3 replicas." +displayName: "standard-1c2g3w10" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/yugabytedb/2024/plans/standard-1c2g3w10/values.yaml b/addons/yugabytedb/2024/plans/standard-1c2g3w10/values.yaml new file mode 100644 index 00000000..7b28893d --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-1c2g3w10/values.yaml @@ -0,0 +1,31 @@ +storage: + master: + count: 2 + size: 10Gi + tserver: + count: 2 + size: 10Gi + +resource: + master: + requests: + cpu: 2 + memory: 2Gi + limits: + cpu: 2 + memory: 2Gi + tserver: + requests: + cpu: 2 + memory: 4Gi + limits: + cpu: 2 + memory: 4Gi + +replicas: + master: 3 + tserver: 3 + +partition: + master: 3 + tserver: 3 \ No newline at end of file diff --git a/addons/yugabytedb/2024/plans/standard-2c4g3w20/bind.yaml b/addons/yugabytedb/2024/plans/standard-2c4g3w20/bind.yaml new file mode 100644 index 00000000..6f4da8e3 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-2c4g3w20/bind.yaml @@ -0,0 +1,37 @@ +credential: + - name: TSERVER_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .spec.clusterIP }' + - name: TSERVER_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-tserver-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: TSERVER_PORT + value: "5433" + - name: UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yugabyted-ui-service + jsonpath: '{ .spec.clusterIP }' + - name: UI_PORT + value: "15433" + - name: MASTER_UI_EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: MASTER_UI_HOST + valueFrom: + serviceRef: + name: {{ (include "yugabyte.fullname" .) }}-yb-master-ui + jsonpath: '{ .spec.clusterIP }' + - name: MASTER_UI_PORT + value: "7000" diff --git a/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/yugabytedb/2024/plans/standard-2c4g3w20/meta.yaml b/addons/yugabytedb/2024/plans/standard-2c4g3w20/meta.yaml new file mode 100644 index 00000000..3036a2f9 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-2c4g3w20/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g3w20" +id: 60a4ad4b-9243-4d2a-831d-c6a8b8a03985 +description: "ZooKeeper standard-2c4g20 plan which limit resources 2 cores 4G memory 3 replicas." +displayName: "standard-2c4g3w20" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/yugabytedb/2024/plans/standard-2c4g3w20/values.yaml b/addons/yugabytedb/2024/plans/standard-2c4g3w20/values.yaml new file mode 100644 index 00000000..65cea216 --- /dev/null +++ b/addons/yugabytedb/2024/plans/standard-2c4g3w20/values.yaml @@ -0,0 +1,31 @@ +storage: + master: + count: 2 + size: 20Gi + tserver: + count: 2 + size: 20Gi + +resource: + master: + requests: + cpu: 2 + memory: 2Gi + limits: + cpu: 2 + memory: 2Gi + tserver: + requests: + cpu: 2 + memory: 4Gi + limits: + cpu: 2 + memory: 4Gi + +replicas: + master: 3 + tserver: 3 + +partition: + master: 3 + tserver: 3 \ No newline at end of file From 880d52226337969084d44c99a675120ecb640c8d Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 11 Nov 2024 16:35:48 +0800 Subject: [PATCH 20/93] chore(redis): modify healthcheck timeoutSeconds --- addons/redis-cluster/7.0/chart/redis-cluster/values.yaml | 2 +- addons/redis/7.0/chart/redis/values.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml index bc946d3d..b66ee1e3 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml @@ -549,7 +549,7 @@ redis: enabled: true initialDelaySeconds: 5 periodSeconds: 5 - timeoutSeconds: 1 + timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## @param redis.startupProbe.enabled Enable startupProbe diff --git a/addons/redis/7.0/chart/redis/values.yaml b/addons/redis/7.0/chart/redis/values.yaml index 99759d05..6dcbef97 100644 --- a/addons/redis/7.0/chart/redis/values.yaml +++ b/addons/redis/7.0/chart/redis/values.yaml @@ -230,7 +230,7 @@ master: enabled: true initialDelaySeconds: 20 periodSeconds: 5 - timeoutSeconds: 1 + timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## @param master.customStartupProbe Custom startupProbe that overrides the default one @@ -599,7 +599,7 @@ replica: enabled: true initialDelaySeconds: 20 periodSeconds: 5 - timeoutSeconds: 1 + timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## @param replica.customStartupProbe Custom startupProbe that overrides the default one @@ -1028,7 +1028,7 @@ sentinel: enabled: true initialDelaySeconds: 20 periodSeconds: 5 - timeoutSeconds: 1 + timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one From 1544d17d000267f62046dd75e3e6721e5b1ec3d2 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 22 Nov 2024 10:47:29 +0800 Subject: [PATCH 21/93] chore(flink): modify flink plans --- addons/apollo/2.3/chart/apollo/.helmignore | 21 ++ addons/apollo/2.3/chart/apollo/1.yaml | 260 +++++++++++++++++ addons/apollo/2.3/chart/apollo/Chart.yaml | 25 ++ addons/apollo/2.3/chart/apollo/README.md | 0 .../2.3/chart/apollo/templates/NOTES.txt | 0 .../2.3/chart/apollo/templates/_helpers.tpl | 133 +++++++++ .../apollo/templates/adminservice/NOTES.txt | 32 ++ .../templates/adminservice/deployment.yaml | 88 ++++++ .../templates/adminservice/ingress.yaml | 63 ++++ .../apollo/templates/adminservice/secret.yaml | 20 ++ .../templates/adminservice/service.yaml | 22 ++ .../apollo/templates/configservice/NOTES.txt | 32 ++ .../templates/configservice/deployment.yaml | 88 ++++++ .../templates/configservice/ingress.yaml | 63 ++++ .../templates/configservice/secret.yaml | 22 ++ .../templates/configservice/service.yaml | 22 ++ .../chart/apollo/templates/portal/NOTES.txt | 25 ++ .../apollo/templates/portal/deployment.yaml | 102 +++++++ .../apollo/templates/portal/ingress.yaml | 64 ++++ .../chart/apollo/templates/portal/secret.yaml | 36 +++ .../apollo/templates/portal/service.yaml | 23 ++ addons/apollo/2.3/chart/apollo/values.yaml | 273 ++++++++++++++++++ addons/apollo/2.3/meta.yaml | 27 ++ .../2.3/plans/standard-1c2g2w/bind.yaml | 43 +++ .../create-instance-schema.json | 0 .../2.3/plans/standard-1c2g2w/meta.yaml | 6 + .../2.3/plans/standard-1c2g2w/values.yaml | 60 ++++ .../flink/1.17/plans/standard-2c4g/meta.yaml | 6 - .../bind.yaml | 0 .../create-instance-schema.json | 0 .../1.17/plans/standard-2c4g5w/meta.yaml | 6 + .../values.yaml | 4 +- .../flink/1.17/plans/standard-4c8g/meta.yaml | 6 - .../bind.yaml | 0 .../create-instance-schema.json | 12 + .../1.17/plans/standard-4c8g5w/meta.yaml | 6 + .../values.yaml | 4 +- 37 files changed, 1578 insertions(+), 16 deletions(-) create mode 100644 addons/apollo/2.3/chart/apollo/.helmignore create mode 100644 addons/apollo/2.3/chart/apollo/1.yaml create mode 100644 addons/apollo/2.3/chart/apollo/Chart.yaml create mode 100644 addons/apollo/2.3/chart/apollo/README.md create mode 100644 addons/apollo/2.3/chart/apollo/templates/NOTES.txt create mode 100644 addons/apollo/2.3/chart/apollo/templates/_helpers.tpl create mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt create mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt create mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt create mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml create mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/service.yaml create mode 100644 addons/apollo/2.3/chart/apollo/values.yaml create mode 100644 addons/apollo/2.3/meta.yaml create mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml rename addons/{flink/1.17/plans/standard-2c4g => apollo/2.3/plans/standard-1c2g2w}/create-instance-schema.json (100%) create mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml create mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/values.yaml delete mode 100644 addons/flink/1.17/plans/standard-2c4g/meta.yaml rename addons/flink/1.17/plans/{standard-2c4g => standard-2c4g5w}/bind.yaml (100%) rename addons/flink/1.17/plans/{standard-4c8g => standard-2c4g5w}/create-instance-schema.json (100%) create mode 100644 addons/flink/1.17/plans/standard-2c4g5w/meta.yaml rename addons/flink/1.17/plans/{standard-2c4g => standard-2c4g5w}/values.yaml (97%) delete mode 100644 addons/flink/1.17/plans/standard-4c8g/meta.yaml rename addons/flink/1.17/plans/{standard-4c8g => standard-4c8g5w}/bind.yaml (100%) create mode 100644 addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json create mode 100644 addons/flink/1.17/plans/standard-4c8g5w/meta.yaml rename addons/flink/1.17/plans/{standard-4c8g => standard-4c8g5w}/values.yaml (97%) diff --git a/addons/apollo/2.3/chart/apollo/.helmignore b/addons/apollo/2.3/chart/apollo/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/addons/apollo/2.3/chart/apollo/1.yaml b/addons/apollo/2.3/chart/apollo/1.yaml new file mode 100644 index 00000000..c6edc2b8 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/1.yaml @@ -0,0 +1,260 @@ +--- +# Source: apollo/templates/adminservice/secret.yaml +kind: Secret +apiVersion: v1 +metadata: + name: release-name-apollo-adminservice +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 +--- +# Source: apollo/templates/portal/secret.yaml +kind: Secret +apiVersion: v1 +metadata: + name: release-name-apollo-portal +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloPortalDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 + apollo.portal.envs = dev + apollo-env.properties: | + dev = release-name-apollo-configservice:8080 +--- +# Source: apollo/templates/configservice/secret.yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: release-name-apollo-configservice +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 + apollo.config-service.url = http://release-name-apollo-configservice.default:8080 + apollo.admin-service.url = http://release-name-apollo-adminservice.default:8090 +--- +# Source: apollo/templates/adminservice/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-adminservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8090 + targetPort: 8090 + selector: + app: release-name-apollo-adminservice +--- +# Source: apollo/templates/configservice/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-configservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + app: release-name-apollo-configservice +--- +# Source: apollo/templates/portal/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-portal + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8070 + targetPort: 8070 + selector: + app: release-name-apollo-portal + sessionAffinity: ClientIP +--- +# Source: apollo/templates/adminservice/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-adminservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 2 + selector: + matchLabels: + app: release-name-apollo-adminservice + template: + metadata: + labels: + app: release-name-apollo-adminservice + spec: + volumes: + - name: volume-configmap-release-name-apollo-adminservice + configMap: + name: release-name-apollo-adminservice + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: apollo-adminservice + image: "drycc-addons/apollo-adminservice:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8090 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,kubernetes" + volumeMounts: + - name: volume-configmap-release-name-apollo-adminservice + mountPath: /apollo-adminservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: 8090 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8090 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/configservice/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-configservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 2 + selector: + matchLabels: + app: release-name-apollo-configservice + template: + metadata: + labels: + app: release-name-apollo-configservice + spec: + volumes: + - name: volume-configmap-release-name-apollo-configservice + configMap: + name: release-name-apollo-configservice + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: apollo-configservice + image: "drycc-addons/apollo-configservice:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,kubernetes" + volumeMounts: + - name: volume-configmap-release-name-apollo-configservice + mountPath: /apollo-configservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/portal/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-portal + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 1 + selector: + matchLabels: + app: release-name-apollo-portal + template: + metadata: + labels: + app: release-name-apollo-portal + spec: + volumes: + - name: secret-release-name-apollo-portal + Secret: + name: release-name-apollo-portal + items: + - key: application-github.properties + path: application-github.properties + - key: apollo-env.properties + path: apollo-env.properties + defaultMode: 420 + containers: + - name: apollo-portal + image: "drycc-addons/apollo-portal:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8070 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,auth" + volumeMounts: + - name: secret-release-name-apollo-portal + mountPath: /apollo-portal/config/application-github.properties + subPath: application-github.properties + - name: secret-release-name-apollo-portal + mountPath: /apollo-portal/config/apollo-env.properties + subPath: apollo-env.properties + livenessProbe: + tcpSocket: + port: 8070 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8070 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/portal/ingress.yaml +# diff --git a/addons/apollo/2.3/chart/apollo/Chart.yaml b/addons/apollo/2.3/chart/apollo/Chart.yaml new file mode 100644 index 00000000..07bdb346 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/Chart.yaml @@ -0,0 +1,25 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: config + licenses: Apache-2.0 +apiVersion: v2 +appVersion: "2.3.0" +dependencies: +- name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.3 +description: A Helm chart for Apollo Config Service and Apollo Admin Service +home: https://github.com/apolloconfig/apollo +icon: https://raw.githubusercontent.com/apolloconfig/apollo/master/apollo-portal/src/main/resources/static/img/logo-simple.png +keywords: +- apollo +- apolloconfig +maintainers: +- name: Drycc Community. + url: https://github.com/drycc-addons/addons +name: apollo +sources: +- https://github.com/drycc-addons/addons +version: 0.1.0 diff --git a/addons/apollo/2.3/chart/apollo/README.md b/addons/apollo/2.3/chart/apollo/README.md new file mode 100644 index 00000000..e69de29b diff --git a/addons/apollo/2.3/chart/apollo/templates/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/NOTES.txt new file mode 100644 index 00000000..e69de29b diff --git a/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl b/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl new file mode 100644 index 00000000..6baef133 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl @@ -0,0 +1,133 @@ +{{/* vim: set filetype=mustache: */}} + + +{{/* +Full name for portal service +*/}} +{{- define "apollo.portal.fullName" -}} +{{- if .Values.portal.fullNameOverride -}} +{{- .Values.portal.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.portal.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.portal.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "apollo.labels" -}} +{{- if .Chart.AppVersion -}} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- end -}} + +{{/* +Service name for portal +*/}} +{{- define "apollo.portal.serviceName" -}} +{{- if .Values.portal.service.fullNameOverride -}} +{{- .Values.portal.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.portal.fullName" .}} +{{- end -}} +{{- end -}} + + +{{/* vim: set filetype=mustache: */}} + +{{/* +Service name for configdb +*/}} +{{- define "apollo.configdb.serviceName" -}} +{{- .Values.apolloService.configdb.host -}} +{{- end -}} + +{{/* +Service port for configdb +*/}} +{{- define "apollo.configdb.servicePort" -}} +{{- if .Values.apolloService.configdb.service.enabled -}} +{{- .Values.apolloService.configdb.service.port -}} +{{- else -}} +{{- .Values.apolloService.configdb.port -}} +{{- end -}} +{{- end -}} + +{{/* +Full name for config service +*/}} +{{- define "apollo.configService.fullName" -}} +{{- if .Values.apolloService.configService.fullNameOverride -}} +{{- .Values.apolloService.configService.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.apolloService.configService.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.apolloService.configService.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Service name for config service +*/}} +{{- define "apollo.configService.serviceName" -}} +{{- if .Values.apolloService.configService.service.fullNameOverride -}} +{{- .Values.apolloService.configService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.configService.fullName" .}} +{{- end -}} +{{- end -}} + +{{/* +Config service url to be accessed by apollo-client +*/}} +{{- define "apollo.configService.serviceUrl" -}} +{{- if .Values.apolloService.configService.config.configServiceUrlOverride -}} +{{ .Values.apolloService.configService.config.configServiceUrlOverride }} +{{- else -}} +http://{{ include "apollo.configService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- end -}} +{{- end -}} + +{{/* +Full name for admin service +*/}} +{{- define "apollo.adminService.fullName" -}} +{{- if .Values.apolloService.adminService.fullNameOverride -}} +{{- .Values.apolloService.adminService.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.apolloService.adminService.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.apolloService.adminService.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Service name for admin service +*/}} +{{- define "apollo.adminService.serviceName" -}} +{{- if .Values.apolloService.adminService.service.fullNameOverride -}} +{{- .Values.apolloService.adminService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.adminService.fullName" .}} +{{- end -}} +{{- end -}} + +{{/* +Admin service url to be accessed by apollo-portal +*/}} +{{- define "apollo.adminService.serviceUrl" -}} +{{- if .Values.apolloService.configService.config.adminServiceUrlOverride -}} +{{ .Values.apolloService.configService.config.adminServiceUrlOverride -}} +{{- else -}} +http://{{ include "apollo.adminService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.adminService.service.port }}{{ .Values.apolloService.adminService.config.contextPath }} +{{- end -}} +{{- end -}} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt new file mode 100644 index 00000000..78ce9341 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt @@ -0,0 +1,32 @@ +Meta service url for current release: +{{- if contains "NodePort" .Values.apolloService.configService.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} + echo {{ include "apollo.configService.serviceUrl" .}} + +For local test use: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 +{{- end }} + +{{- if .Values.apolloService.configService.ingress.enabled }} + +Ingress: +{{- range $host := .Values.apolloService.configService.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} + +Urls registered to meta service: +Config service: {{ include "apollo.configService.serviceUrl" .}} +Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml new file mode 100644 index 00000000..d115a3c3 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $adminServiceFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.apolloService.adminService.replicaCount }} + selector: + matchLabels: + app: {{ $adminServiceFullName }} + {{- with .Values.apolloService.adminService.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $adminServiceFullName }} + {{- with .Values.apolloService.adminService.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.apolloService.adminService.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: volume-configmap-{{ $adminServiceFullName }} + configMap: + name: {{ $adminServiceFullName }} + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: {{ .Values.apolloService.adminService.name }} + image: "{{ .Values.apolloService.adminService.image.repository }}:{{ .Values.apolloService.adminService.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.apolloService.adminService.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.apolloService.adminService.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.apolloService.adminService.config.profiles | quote }} + {{- range $key, $value := .Values.apolloService.adminService.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: volume-configmap-{{ $adminServiceFullName }} + mountPath: /apollo-adminservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: {{ .Values.apolloService.adminService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.adminService.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.adminService.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.apolloService.adminService.config.contextPath }}/health + port: {{ .Values.apolloService.adminService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.adminService.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.adminService.readiness.periodSeconds }} + resources: + {{- toYaml .Values.apolloService.adminService.resources | nindent 12 }} + {{- with .Values.apolloService.adminService.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.adminService.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.adminService.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml new file mode 100644 index 00000000..1f5efcdf --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- if .Values.apolloService.adminService.ingress.enabled -}} +{{- $fullName := include "apollo.adminService.fullName" . -}} +{{- $svcPort := .Values.apolloService.adminService.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.apolloService.adminService.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.apolloService.adminService.ingress.ingressClassName }} + ingressClassName: {{ .Values.apolloService.adminService.ingress.ingressClassName }} +{{- end }} +{{- if .Values.apolloService.adminService.ingress.tls }} + tls: + {{- range .Values.apolloService.adminService.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.apolloService.adminService.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml new file mode 100644 index 00000000..d7f35e89 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ $adminServiceFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} + spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} + {{- if .Values.apolloService.adminService.config.contextPath }} + server.servlet.context-path = {{ .Values.apolloService.adminService.config.contextPath }} + {{- end }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml new file mode 100644 index 00000000..ff44f6cc --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.adminService.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.apolloService.adminService.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.apolloService.adminService.service.port }} + targetPort: {{ .Values.apolloService.adminService.service.targetPort }} + selector: + app: {{ include "apollo.adminService.fullName" . }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt new file mode 100644 index 00000000..78ce9341 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt @@ -0,0 +1,32 @@ +Meta service url for current release: +{{- if contains "NodePort" .Values.apolloService.configService.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} + echo {{ include "apollo.configService.serviceUrl" .}} + +For local test use: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 +{{- end }} + +{{- if .Values.apolloService.configService.ingress.enabled }} + +Ingress: +{{- range $host := .Values.apolloService.configService.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} + +Urls registered to meta service: +Config service: {{ include "apollo.configService.serviceUrl" .}} +Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml new file mode 100644 index 00000000..947e8eb9 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $configServiceFullName := include "apollo.configService.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $configServiceFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.apolloService.configService.replicaCount }} + selector: + matchLabels: + app: {{ $configServiceFullName }} + {{- with .Values.apolloService.configService.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $configServiceFullName }} + {{- with .Values.apolloService.configService.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.apolloService.configService.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: volume-configmap-{{ $configServiceFullName }} + configMap: + name: {{ $configServiceFullName }} + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: {{ .Values.apolloService.configService.name }} + image: "{{ .Values.apolloService.configService.image.repository }}:{{ .Values.apolloService.configService.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.apolloService.configService.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.apolloService.configService.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.apolloService.configService.config.profiles | quote }} + {{- range $key, $value := .Values.apolloService.configService.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: volume-configmap-{{ $configServiceFullName }} + mountPath: /apollo-configservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: {{ .Values.apolloService.configService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.configService.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.configService.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.apolloService.configService.config.contextPath }}/health + port: {{ .Values.apolloService.configService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.configService.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.configService.readiness.periodSeconds }} + resources: + {{- toYaml .Values.apolloService.configService.resources | nindent 12 }} + {{- with .Values.apolloService.configService.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.configService.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.configService.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml new file mode 100644 index 00000000..36fc5421 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- if .Values.apolloService.configService.ingress.enabled -}} +{{- $fullName := include "apollo.configService.fullName" . -}} +{{- $svcPort := .Values.apolloService.configService.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.apolloService.configService.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.apolloService.configService.ingress.ingressClassName }} + ingressClassName: {{ .Values.apolloService.configService.ingress.ingressClassName }} +{{- end }} +{{- if .Values.apolloService.configService.ingress.tls }} + tls: + {{- range .Values.apolloService.configService.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.apolloService.configService.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml new file mode 100644 index 00000000..5224b51d --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $configServiceFullName := include "apollo.configService.fullName" . }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ $configServiceFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} + spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} + apollo.config-service.url = {{ include "apollo.configService.serviceUrl" .}} + apollo.admin-service.url = {{ include "apollo.adminService.serviceUrl" .}} + {{- if .Values.apolloService.configService.config.contextPath }} + server.servlet.context-path = {{ .Values.apolloService.configService.config.contextPath }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml new file mode 100644 index 00000000..9bcbb5e1 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.configService.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.apolloService.configService.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.apolloService.configService.service.port }} + targetPort: {{ .Values.apolloService.configService.service.targetPort }} + selector: + app: {{ include "apollo.configService.fullName" . }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt new file mode 100644 index 00000000..d49cfe0e --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt @@ -0,0 +1,25 @@ +Portal url for current release: +{{- if contains "NodePort" .Values.portal.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.portal.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.portal.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.portal.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.portal.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.portal.service.port }} +{{- else if contains "ClusterIP" .Values.portal.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.portal.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8070 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8070:8070 +{{- end }} + +{{- if .Values.portal.ingress.enabled }} + +Ingress: +{{- range $host := .Values.portal.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.portal.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml new file mode 100644 index 00000000..7c86b3e7 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml @@ -0,0 +1,102 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +{{- $portalFullName := include "apollo.portal.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $portalFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.portal.replicaCount }} + selector: + matchLabels: + app: {{ $portalFullName }} + {{- with .Values.portal.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $portalFullName }} + {{- with .Values.portal.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.portal.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: secret-{{ $portalFullName }} + Secret: + name: {{ $portalFullName }} + items: + - key: application-github.properties + path: application-github.properties + - key: apollo-env.properties + path: apollo-env.properties + {{- range $fileName, $content := .Values.portal.config.files }} + - key: {{ $fileName }} + path: {{ $fileName }} + {{- end }} + defaultMode: 420 + containers: + - name: {{ .Values.portal.name }} + image: "{{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.portal.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.portal.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.portal.config.profiles | quote }} + {{- range $key, $value := .Values.portal.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/application-github.properties + subPath: application-github.properties + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/apollo-env.properties + subPath: apollo-env.properties + {{- range $fileName, $content := .Values.portal.config.files }} + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/{{ $fileName }} + subPath: {{ $fileName }} + {{- end }} + livenessProbe: + tcpSocket: + port: {{ .Values.portal.containerPort }} + initialDelaySeconds: {{ .Values.portal.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.portal.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.portal.config.contextPath }}/health + port: {{ .Values.portal.containerPort }} + initialDelaySeconds: {{ .Values.portal.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.portal.readiness.periodSeconds }} + resources: + {{- toYaml .Values.portal.resources | nindent 12 }} + {{- with .Values.portal.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.portal.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.portal.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml new file mode 100644 index 00000000..b01dc05b --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml @@ -0,0 +1,64 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +# +{{- if .Values.portal.ingress.enabled -}} +{{- $fullName := include "apollo.portal.fullName" . -}} +{{- $svcPort := .Values.portal.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.portal.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.portal.ingress.ingressClassName }} + ingressClassName: {{ .Values.portal.ingress.ingressClassName }} +{{- end }} +{{- if .Values.portal.ingress.tls }} + tls: + {{- range .Values.portal.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.portal.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml new file mode 100644 index 00000000..32e8cfbb --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml @@ -0,0 +1,36 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +{{- $portalFullName := include "apollo.portal.fullName" . }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ $portalFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.portal.portaldb.host }}:{{ .Values.portal.portaldb.port }}/{{ .Values.portal.portaldb.dbName }}{{ if .Values.portal.portaldb.connectionStringProperties }}?{{ .Values.portal.portaldb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "portaldb.userName is required!" .Values.portal.portaldb.userName }} + spring.datasource.password = {{ required "portaldb.password is required!" .Values.portal.portaldb.password }} + {{- if .Values.portal.config.envs }} + apollo.portal.envs = {{ .Values.portal.config.envs }} + {{- end }} + {{- if .Values.portal.config.contextPath }} + server.servlet.context-path = {{ .Values.portal.config.contextPath }} + {{- end }} + apollo-env.properties: | + {{- if .Values.apolloService.enabled }} + {{ .Values.apolloService.meta }} = {{ include "apollo.configService.serviceName" . }}:{{ .Values.apolloService.configService.service.port }} + {{- end }} + {{- if .Values.portal.config.metaServers }} + {{- range $env, $address := .Values.portal.config.metaServers }} + {{ $env }}.meta = {{ $address }} + {{- end }} + {{- end }} +{{- range $fileName, $content := .Values.portal.config.files }} +{{ $fileName | indent 2 }}: | +{{ $content | indent 4 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml new file mode 100644 index 00000000..da8237d5 --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.portal.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.portal.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.portal.service.port }} + targetPort: {{ .Values.portal.service.targetPort }} + selector: + app: {{ include "apollo.portal.fullName" . }} + sessionAffinity: {{ .Values.portal.service.sessionAffinity }} +{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/values.yaml b/addons/apollo/2.3/chart/apollo/values.yaml new file mode 100644 index 00000000..7a00ff0e --- /dev/null +++ b/addons/apollo/2.3/chart/apollo/values.yaml @@ -0,0 +1,273 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +portal: + enabled: true + name: apollo-portal + fullNameOverride: "" + replicaCount: 1 + containerPort: 8070 + image: + registry: registry.drycc.cc + repository: drycc-addons/apollo-portal + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8070 + targetPort: 8070 + type: ClusterIP + sessionAffinity: ClientIP + ingress: + ingressClassName: null + enabled: false + annotations: {} + hosts: + - host: "" + paths: [] + tls: [] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + + config: + # spring profiles to activate + profiles: "github,auth" + # specify the env names, e.g. dev,pro + envs: "dev" + # specify the meta servers, e.g. + # dev: http://apollo-configservice-dev:8080 + # pro: http://apollo-configservice-pro:8080 + metaServers: "" + # specify the context path, e.g. /apollo + contextPath: "" + # extra config files for apollo-portal, e.g. application-ldap.yml + files: {} + + portaldb: + # apolloportaldb host + host: "" + port: 3306 + dbName: ApolloPortalDB + # apolloportaldb user name + userName: "1" + # apolloportaldb password + password: "1" + connectionStringProperties: characterEncoding=utf8 + + +## @section Apollo parameters + +apolloService: + enabled: true + meta: "dev" + configdb: + # apolloconfigdb host + host: "" + port: 3306 + dbName: ApolloConfigDB + # apolloconfigdb user name + userName: "1" + # apolloconfigdb password + password: "1" + connectionStringProperties: characterEncoding=utf8 + + configService: + name: apollo-configservice + fullNameOverride: "" + replicaCount: 2 + containerPort: 8080 + image: + repository: drycc-addons/apollo-configservice + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8080 + targetPort: 8080 + type: ClusterIP + ingress: + ingressClassName: null + enabled: false + annotations: { } + hosts: + - host: "" + paths: [ ] + tls: [ ] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + config: + # spring profiles to activate + profiles: "github,kubernetes" + # override apollo.config-service.url: config service url to be accessed by apollo-client + configServiceUrlOverride: "" + # override apollo.admin-service.url: admin service url to be accessed by apollo-portal + adminServiceUrlOverride: "" + # specify the context path, e.g. /apollo + contextPath: "" + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + + adminService: + name: apollo-adminservice + fullNameOverride: "" + replicaCount: 2 + containerPort: 8090 + image: + registry: registry.drycc.cc + repository: drycc-addons/apollo-adminservice + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8090 + targetPort: 8090 + type: ClusterIP + ingress: + ingressClassName: null + enabled: false + annotations: { } + hosts: + - host: "" + paths: [ ] + tls: [ ] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + config: + # spring profiles to activate + profiles: "github,kubernetes" + # specify the context path, e.g. /apollo + contextPath: "" + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + diff --git a/addons/apollo/2.3/meta.yaml b/addons/apollo/2.3/meta.yaml new file mode 100644 index 00000000..edb96d83 --- /dev/null +++ b/addons/apollo/2.3/meta.yaml @@ -0,0 +1,27 @@ +name: apollo +version: 2 +id: 06653a76-126d-4c9d-a929-e4841185ab68 +description: "apollo." +displayName: "apollo" +metadata: + displayName: "apollo" + provider: + name: drycc + supportURL: https://www.apolloconfig.com/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/apollo-adminservice +tags: apollo +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "portal.enabled" + required: false + description: "portal.enabled config for values.yaml" +- name: "portal.config" + required: false + description: "portal.config config for values.yaml" +- name: "portal.portaldb" + required: false + description: "portal.config config for values.yaml" +archive: false diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml new file mode 100644 index 00000000..e37ddd93 --- /dev/null +++ b/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/flink/1.17/plans/standard-2c4g/create-instance-schema.json b/addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json similarity index 100% rename from addons/flink/1.17/plans/standard-2c4g/create-instance-schema.json rename to addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml new file mode 100644 index 00000000..a10be35d --- /dev/null +++ b/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g2w" +id: 75f949c8-8366-4805-aa8b-553de0ec6c24 +description: "airflow standard-1c2g2w plan which limit resources 2 workers per worker 1 core memory size 2Gi." +displayName: "standard-1c2g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml new file mode 100644 index 00000000..caeaba82 --- /dev/null +++ b/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml @@ -0,0 +1,60 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-1c2g2w + +## @section Airflow web parameters + +web: + ## @param web.replicaCount Number of Airflow web replicas + ## + replicaCount: 1 + ## Airflow web resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param web.resources.limits The resources limits for the Airflow web containers + ## @param web.resources.requests The requested resources for the Airflow web containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi + +## @section Airflow scheduler parameters + +scheduler: + ## @param scheduler.replicaCount Number of scheduler replicas + ## + replicaCount: 1 + ## Airflow scheduler resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param scheduler.resources.limits The resources limits for the Airflow scheduler containers + ## @param scheduler.resources.requests The requested resources for the Airflow scheduler containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi + +## @section Airflow worker parameters + +worker: + ## @param worker.replicaCount Number of Airflow worker replicas + ## + replicaCount: 2 + ## Airflow worker resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param worker.resources.limits The resources limits for the Airflow worker containers + ## @param worker.resources.requests The requested resources for the Airflow worker containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi diff --git a/addons/flink/1.17/plans/standard-2c4g/meta.yaml b/addons/flink/1.17/plans/standard-2c4g/meta.yaml deleted file mode 100644 index 8f5bb4dc..00000000 --- a/addons/flink/1.17/plans/standard-2c4g/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-2c4g" -id: 46148cab-d613-4a08-88c9-16dc0422eadb -description: "flink standard-2c4g plan which limit resources 2 cores 4G memory." -displayName: "standard-2c4g" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/flink/1.17/plans/standard-2c4g/bind.yaml b/addons/flink/1.17/plans/standard-2c4g5w/bind.yaml similarity index 100% rename from addons/flink/1.17/plans/standard-2c4g/bind.yaml rename to addons/flink/1.17/plans/standard-2c4g5w/bind.yaml diff --git a/addons/flink/1.17/plans/standard-4c8g/create-instance-schema.json b/addons/flink/1.17/plans/standard-2c4g5w/create-instance-schema.json similarity index 100% rename from addons/flink/1.17/plans/standard-4c8g/create-instance-schema.json rename to addons/flink/1.17/plans/standard-2c4g5w/create-instance-schema.json diff --git a/addons/flink/1.17/plans/standard-2c4g5w/meta.yaml b/addons/flink/1.17/plans/standard-2c4g5w/meta.yaml new file mode 100644 index 00000000..8f86f4b7 --- /dev/null +++ b/addons/flink/1.17/plans/standard-2c4g5w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g5w" +id: 46148cab-d613-4a08-88c9-16dc0422eadb +description: "flink standard-2c4g5w plan which limit resources 2 cores 4G memory and 5 taskmanagers." +displayName: "standard-2c4g5w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/flink/1.17/plans/standard-2c4g/values.yaml b/addons/flink/1.17/plans/standard-2c4g5w/values.yaml similarity index 97% rename from addons/flink/1.17/plans/standard-2c4g/values.yaml rename to addons/flink/1.17/plans/standard-2c4g5w/values.yaml index 2d6eaa3e..d52805f3 100644 --- a/addons/flink/1.17/plans/standard-2c4g/values.yaml +++ b/addons/flink/1.17/plans/standard-2c4g5w/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: hb-flink-standard-2c4g +fullnameOverride: hb-flink-standard-2c4g5w ## @section Jobmanager deployment parameters ## @@ -32,7 +32,7 @@ jobmanager: taskmanager: ## @param taskmanager.replicaCount Number of Apache Flink replicas ## - replicaCount: 2 + replicaCount: 5 ## Apache Flink pods' resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## Minimum memory for development is 4GB and 2 CPU cores diff --git a/addons/flink/1.17/plans/standard-4c8g/meta.yaml b/addons/flink/1.17/plans/standard-4c8g/meta.yaml deleted file mode 100644 index cf91a3f8..00000000 --- a/addons/flink/1.17/plans/standard-4c8g/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-4c8g" -id: 6cd5bc99-9af8-4451-a75b-2d10957482ed -description: "flink standard-4c8g plan which limit resources 4 cores 8G memory." -displayName: "standard-4c8g" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/flink/1.17/plans/standard-4c8g/bind.yaml b/addons/flink/1.17/plans/standard-4c8g5w/bind.yaml similarity index 100% rename from addons/flink/1.17/plans/standard-4c8g/bind.yaml rename to addons/flink/1.17/plans/standard-4c8g5w/bind.yaml diff --git a/addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json b/addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/flink/1.17/plans/standard-4c8g5w/meta.yaml b/addons/flink/1.17/plans/standard-4c8g5w/meta.yaml new file mode 100644 index 00000000..ccb7b28b --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c8g5w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g5w" +id: 6cd5bc99-9af8-4451-a75b-2d10957482ed +description: "flink standard-4c8g5w plan which limit resources 4 cores 8G memory and 5 taskmanagers" +displayName: "standard-4c8g5w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/flink/1.17/plans/standard-4c8g/values.yaml b/addons/flink/1.17/plans/standard-4c8g5w/values.yaml similarity index 97% rename from addons/flink/1.17/plans/standard-4c8g/values.yaml rename to addons/flink/1.17/plans/standard-4c8g5w/values.yaml index 09e2b4be..dfdac959 100644 --- a/addons/flink/1.17/plans/standard-4c8g/values.yaml +++ b/addons/flink/1.17/plans/standard-4c8g5w/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: hb-flink-standard-4c8g +fullnameOverride: hb-flink-standard-4c8g5w ## @section Jobmanager deployment parameters ## @@ -32,7 +32,7 @@ jobmanager: taskmanager: ## @param taskmanager.replicaCount Number of Apache Flink replicas ## - replicaCount: 2 + replicaCount: 5 ## Apache Flink pods' resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## Minimum memory for development is 4GB and 2 CPU cores From 399ed9c2bdf3ffeab4f1ea5a9cd49cf629c82208 Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 27 Nov 2024 10:38:31 +0800 Subject: [PATCH 22/93] chore(clickhouse): add plans --- .../24/chart/clickhouse/values.yaml | 1 + .../24/plans/standard-16c64g1000/values.yaml | 4 ++-- .../24/plans/standard-32c64g12000/bind.yaml | 24 +++++++++++++++++++ .../create-instance-schema.json | 12 ++++++++++ .../24/plans/standard-32c64g12000/meta.yaml | 6 +++++ .../24/plans/standard-32c64g12000/values.yaml | 23 ++++++++++++++++++ .../24/plans/standard-4c16g100/values.yaml | 4 ++-- .../24/plans/standard-8c32g500/values.yaml | 4 ++-- addons/grafana/10/meta.yaml | 2 +- 9 files changed, 73 insertions(+), 7 deletions(-) create mode 100644 addons/clickhouse/24/plans/standard-32c64g12000/bind.yaml create mode 100644 addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json create mode 100644 addons/clickhouse/24/plans/standard-32c64g12000/meta.yaml create mode 100644 addons/clickhouse/24/plans/standard-32c64g12000/values.yaml diff --git a/addons/clickhouse/24/chart/clickhouse/values.yaml b/addons/clickhouse/24/chart/clickhouse/values.yaml index 76726030..e711b6b1 100644 --- a/addons/clickhouse/24/chart/clickhouse/values.yaml +++ b/addons/clickhouse/24/chart/clickhouse/values.yaml @@ -294,6 +294,7 @@ defaultConfigurationOverrides: | {{- $shards := $.Values.shards | int }} {{- range $shard, $e := until $shards }} + true {{- $replicas := $.Values.replicaCount | int }} {{- range $i, $_e := until $replicas }} diff --git a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml index 60735b83..6c4b2f47 100644 --- a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml +++ b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 16000m memory: 64Gi requests: - cpu: 16000m - memory: 64Gi + cpu: 4000m + memory: 16Gi persistence: size: 1000Gi \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/bind.yaml b/addons/clickhouse/24/plans/standard-32c64g12000/bind.yaml new file mode 100644 index 00000000..8cddaa51 --- /dev/null +++ b/addons/clickhouse/24/plans/standard-32c64g12000/bind.yaml @@ -0,0 +1,24 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: {{ printf "EXTRANET_HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: {{ printf "HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + value: '{{ .Values.auth.username }}' + - name: TCP_PORT + value: 9000 + - name: HTTP_PORT + value: 8123 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json b/addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/meta.yaml b/addons/clickhouse/24/plans/standard-32c64g12000/meta.yaml new file mode 100644 index 00000000..384a51c8 --- /dev/null +++ b/addons/clickhouse/24/plans/standard-32c64g12000/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c64g12000" +id: 24a85352-a7e0-11ef-8a06-ef08afb2fad9 +description: "clickhouse standard-32c64g12000 plan: Disk 12000Gi ,vCPUs 32 , RAM 64G " +displayName: "standard-32c64g12000" +bindable: true +maximum_polling_duration: 1800 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml new file mode 100644 index 00000000..8be44d06 --- /dev/null +++ b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-clickhouse-cluster-standard-32c64g12000 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 32000m + memory: 64Gi + requests: + cpu: 8000m + memory: 16Gi + +persistence: + size: 12000Gi \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml index 8b69f236..002cc894 100644 --- a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml +++ b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4000m memory: 16Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 2000m + memory: 4Gi persistence: size: 100Gi diff --git a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml index 3d1c8a1e..dab1c884 100644 --- a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml +++ b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8000m memory: 32Gi requests: - cpu: 8000m - memory: 32Gi + cpu: 2000m + memory: 8Gi persistence: size: 500Gi \ No newline at end of file diff --git a/addons/grafana/10/meta.yaml b/addons/grafana/10/meta.yaml index 81d5f692..4a066bd4 100644 --- a/addons/grafana/10/meta.yaml +++ b/addons/grafana/10/meta.yaml @@ -13,7 +13,7 @@ tags: grafana bindable: true instances_retrievable: true bindings_retrievable: true -plan_updateable: true +plan_updateable: false allow_parameters: - name: "networkPolicy.allowNamespaces" required: false From e842dd693c20d70ac5d081ace3334efc4d6ecc30 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 28 Nov 2024 13:37:03 +0800 Subject: [PATCH 23/93] chore(mysql-cluster): remove plans 1c2g10 --- .../8.0/chart/mysql-cluster/values.yaml | 1 - .../8.0/plans/standard-1c2g10/bind.yaml | 35 ----------- .../create-instance-schema.json | 12 ---- .../8.0/plans/standard-1c2g10/meta.yaml | 6 -- .../8.0/plans/standard-1c2g10/values.yaml | 61 ------------------- 5 files changed, 115 deletions(-) delete mode 100644 addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml delete mode 100644 addons/mysql-cluster/8.0/plans/standard-1c2g10/create-instance-schema.json delete mode 100644 addons/mysql-cluster/8.0/plans/standard-1c2g10/meta.yaml delete mode 100644 addons/mysql-cluster/8.0/plans/standard-1c2g10/values.yaml diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index 34a406b2..d32c9c56 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -235,7 +235,6 @@ primary: character-set-server=UTF8MB4 collation-server=UTF8MB4_general_ci slow_query_log=0 - slow_query_log_file=/opt/drycc/mysql/logs/mysqld.log long_query_time=10.0 log_timestamps=system disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" diff --git a/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml b/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml deleted file mode 100644 index a0668383..00000000 --- a/addons/mysql-cluster/8.0/plans/standard-1c2g10/bind.yaml +++ /dev/null @@ -1,35 +0,0 @@ -credential: -{{- if (eq .Values.router.service.type "LoadBalancer") }} - - name: EXTRANET_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }}-router - jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} - - name: DOMAIN - value: {{ template "common.names.fullname" . }}-router.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - - name: HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }}-router - jsonpath: '{ .spec.clusterIP }' - - name: PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }}-svcbind-administrator-user - jsonpath: '{ .data.password }' - - name: USERNAME - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }}-svcbind-administrator-user - jsonpath: '{ .data.username }' - - name: READONLY_PORT - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }}-svcbind-administrator-user - jsonpath: '{ .data.portro }' - - name: READWRITE_PORT - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }}-svcbind-administrator-user - jsonpath: '{ .data.portrw }' \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-1c2g10/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-1c2g10/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/mysql-cluster/8.0/plans/standard-1c2g10/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-1c2g10/meta.yaml b/addons/mysql-cluster/8.0/plans/standard-1c2g10/meta.yaml deleted file mode 100644 index b06fc92b..00000000 --- a/addons/mysql-cluster/8.0/plans/standard-1c2g10/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-1c2g10" -id: 2b455154-8725-482a-95b2-a193c180d9b5 -description: "Mysql Cluster standard-1c2g10 plan: Disk 10Gi ,vCPUs 1 , RAM 2G , DB MAX Connection 600" -displayName: "standard-1c2g10" -bindable: true -maximum_polling_duration: 1800 \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-1c2g10/values.yaml b/addons/mysql-cluster/8.0/plans/standard-1c2g10/values.yaml deleted file mode 100644 index 437127a4..00000000 --- a/addons/mysql-cluster/8.0/plans/standard-1c2g10/values.yaml +++ /dev/null @@ -1,61 +0,0 @@ -## @param fullnameOverride String to fully override common.names.fullname template -## -fullnameOverride: hb-mysql-cluster-standard-10 - -## MinIO® containers' resource requests and limits -## ref: https://kubernetes.io/docs/user-guide/compute-resources/ -## We usually recommend not to specify default resources and to leave this as a conscious -## choice for the user. This also increases chances charts run on environments with little -## resources, such as Minikube. If you do want to specify resources, uncomment the following -## lines, adjust them as necessary, and remove the curly braces after 'resources:'. -## @param resources.limits The resources limits for the MinIO® container -## @param resources.requests The requested resources for the MinIO® container -## -primary: - maxConnectionLimit: 600 - resources: - limits: - cpu: 1000m - memory: 2Gi - requests: - cpu: 1000m - memory: 2Gi - - -## @section Persistence parameters - -## Enable persistence using Persistent Volume Claims -## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ -## - persistence: - ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir - ## - enabled: true - ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas - ## NOTE: When it's set the rest of persistence parameters are ignored - ## - existingClaim: "" - ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param primary.persistence.annotations MySQL primary persistent volume claim annotations - ## - annotations: {} - ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes - ## - accessModes: - - ReadWriteOnce - ## @param primary.persistence.size MySQL primary persistent volume size - ## - size: 10Gi - ## @param primary.persistence.selector Selector to match an existing Persistent Volume - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} From 3a3ae951a1a33c172f58a668a3d220cd3f129221 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 6 Dec 2024 15:41:53 +0800 Subject: [PATCH 24/93] chore(addons): update redis kvrocks meta --- addons/kvrocks/2.8/meta.yaml | 9 +++++++++ addons/redis-cluster/7.0/meta.yaml | 3 +++ addons/redis/7.0/meta.yaml | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/addons/kvrocks/2.8/meta.yaml b/addons/kvrocks/2.8/meta.yaml index 88626c60..711982d4 100644 --- a/addons/kvrocks/2.8/meta.yaml +++ b/addons/kvrocks/2.8/meta.yaml @@ -24,9 +24,18 @@ allow_parameters: - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" +- name: "replica.nodeSelector" + required: false + description: "replica nodeSelector config for values.yaml" - name: "replica.service.type" required: false description: "replica service type config for values.yaml" diff --git a/addons/redis-cluster/7.0/meta.yaml b/addons/redis-cluster/7.0/meta.yaml index 8fd165b0..5936a22d 100644 --- a/addons/redis-cluster/7.0/meta.yaml +++ b/addons/redis-cluster/7.0/meta.yaml @@ -21,6 +21,9 @@ allow_parameters: - name: "redis.useAOFPersistence" required: false description: "redis.useAOFPersistence config for values.yaml" +- name: "redis.nodeSelector" + required: false + description: "redis.nodeSelector config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" diff --git a/addons/redis/7.0/meta.yaml b/addons/redis/7.0/meta.yaml index 43f88697..74688fd6 100644 --- a/addons/redis/7.0/meta.yaml +++ b/addons/redis/7.0/meta.yaml @@ -27,9 +27,15 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" - name: "master.disableCommands" required: false description: "master disableCommands config for values.yaml" +- name: "replica.nodeSelector" + required: false + description: "replica nodeSelector config for values.yaml" - name: "replica.service.type" required: false description: "replica service type config for values.yaml" From 86ad29168be6a1a04ca414f9b31fc3b01f7867d0 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 9 Dec 2024 09:36:27 +0800 Subject: [PATCH 25/93] chore(redis-cluster): update proxy config --- .../redis-cluster/templates/redis-statefulset.yaml | 4 +++- .../redis-cluster/7.0/chart/redis-cluster/values.yaml | 9 +++++++-- .../redis-cluster/7.0/plans/standard-1024/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-128/values.yaml | 11 ++++++++--- .../7.0/plans/standard-16384/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-2048/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-256/values.yaml | 11 ++++++++--- .../7.0/plans/standard-32768/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-4096/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-512/values.yaml | 11 ++++++++--- .../7.0/plans/standard-65536/values.yaml | 11 ++++++++--- .../redis-cluster/7.0/plans/standard-8192/values.yaml | 11 ++++++++--- 12 files changed, 90 insertions(+), 33 deletions(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml index 59146af0..4f9eb1df 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml @@ -326,7 +326,9 @@ spec: -startup-nodes=127.0.0.1:{{ .Values.redis.containerPorts.redis }} \ -addr=0.0.0.0:{{ .Values.proxy.containerPorts.proxy }} \ -password=$(REDIS_PASSWORD) \ - -max-procs={{ .Values.proxy.maxProcs }} + -max-procs={{ .Values.proxy.maxProcs }} \ + -backend-init-connections={{ .Values.proxy.backendInitConnections }} \ + -backend-idle-connections={{ .Values.proxy.backendIdleConnections }} {{- end }} env: {{- if and .Values.usePassword (not .Values.usePasswordFile) }} diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml index b66ee1e3..0d8e9e11 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml @@ -783,8 +783,13 @@ proxy: ## memory: 256Mi ## requests: {} - # max-procs - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 + ## ## Configure extra options for Redis® liveness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) ## @param redis.livenessProbe.enabled Enable livenessProbe diff --git a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml index d416c111..bdcd4ac1 100644 --- a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 1 - memory: 1Gi + cpu: 4 + memory: 8Gi requests: cpu: 500m memory: 512Mi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-128/values.yaml b/addons/redis-cluster/7.0/plans/standard-128/values.yaml index b1cfab9e..17d084b4 100644 --- a/addons/redis-cluster/7.0/plans/standard-128/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-128/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 1 - memory: 1Gi + cpu: 4 + memory: 8Gi requests: cpu: 500m memory: 512Mi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml index c41a02c6..9a948d32 100644 --- a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 2 + ## max-procs + maxProcs: 8 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 1000 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi + cpu: 8 + memory: 16Gi requests: cpu: 1 memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml index bc5cfd0b..438d9c6b 100644 --- a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 2Gi + cpu: 4 + memory: 8Gi requests: cpu: 1 memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-256/values.yaml b/addons/redis-cluster/7.0/plans/standard-256/values.yaml index 46472a2b..507d3315 100644 --- a/addons/redis-cluster/7.0/plans/standard-256/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-256/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 1 - memory: 1Gi + cpu: 4 + memory: 8Gi requests: cpu: 500m memory: 512Mi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml index e835e052..e7544356 100644 --- a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 2 + ## max-procs + maxProcs: 8 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 1000 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi + cpu: 8 + memory: 16Gi requests: cpu: 1 memory: 2Gi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml index 721d0995..2044b555 100644 --- a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 2Gi + cpu: 4 + memory: 8Gi requests: cpu: 1 memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-512/values.yaml b/addons/redis-cluster/7.0/plans/standard-512/values.yaml index 203e907d..34d73d04 100644 --- a/addons/redis-cluster/7.0/plans/standard-512/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-512/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 1 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 1 - memory: 1Gi + cpu: 4 + memory: 8Gi requests: cpu: 500m memory: 512Mi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml index 59907681..2fe8ce9c 100644 --- a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml @@ -41,7 +41,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 2 + ## max-procs + maxProcs: 8 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 1000 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -49,8 +54,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi + cpu: 8 + memory: 16Gi requests: cpu: 1 memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml index e5d1d86f..dc9fa53b 100644 --- a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml @@ -35,7 +35,12 @@ redis: ## @section Proxy® statefulset parameters ## proxy: - maxProcs: 2 + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 500 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -43,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 2Gi + cpu: 4 + memory: 8Gi requests: cpu: 1 memory: 1Gi From 473804d10ea48621d9d83c6192702810461e6604 Mon Sep 17 00:00:00 2001 From: duanhongyi Date: Tue, 10 Dec 2024 11:22:23 +0800 Subject: [PATCH 26/93] chore(seaweedfs): optimize cronjob parameters --- addons/seaweedfs/3/chart/seaweedfs/values.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/addons/seaweedfs/3/chart/seaweedfs/values.yaml b/addons/seaweedfs/3/chart/seaweedfs/values.yaml index f5d0bd8b..ef54e961 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/values.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/values.yaml @@ -391,14 +391,9 @@ volume: cronjob: scripts: | lock - ec.encode -fullPercent=95 -quietFor=1h - ec.rebuild -force - ec.balance -force volume.deleteEmpty -quietFor=24h -force volume.balance -force volume.fix.replication - volume.fsck -reallyDeleteFromVolume - volume.vacuum -garbageThreshold 0 s3.clean.uploads -timeAgo=24h unlock timeZone: "Etc/UTC" From b96779c470069837cdbb92f9a0d1601a3f4c6afe Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 12 Dec 2024 09:53:23 +0800 Subject: [PATCH 27/93] chore(addons): add meta allow_parameters --- addons/airflow/2/meta.yaml | 12 ++++++++++++ addons/flink/1.17/meta.yaml | 6 ++++++ addons/kafka/3.6/meta.yaml | 9 +++++++++ addons/minio/2023/meta.yaml | 3 +++ addons/opensearch/2.10/meta.yaml | 15 +++++++++++++++ addons/rabbitmq/3.12/meta.yaml | 3 +++ addons/spark/3.4/meta.yaml | 6 ++++++ addons/zookeeper/3.9/meta.yaml | 3 +++ 8 files changed, 57 insertions(+) diff --git a/addons/airflow/2/meta.yaml b/addons/airflow/2/meta.yaml index 5fb50aaa..001bb24f 100644 --- a/addons/airflow/2/meta.yaml +++ b/addons/airflow/2/meta.yaml @@ -33,18 +33,27 @@ allow_parameters: - name: "web.image" required: false description: "web image config for values.yaml" +- name: "web.nodeSelector" + required: false + description: "web nodeSelector config for values.yaml" - name: "web.extraEnvVars" required: false description: "web extraEnvVars config for values.yaml" - name: "scheduler.image" required: false description: "scheduler image config for values.yaml" +- name: "scheduler.nodeSelector" + required: false + description: "scheduler nodeSelector config for values.yaml" - name: "scheduler.extraEnvVars" required: false description: "scheduler extraEnvVars config for values.yaml" - name: "worker.image" required: false description: "worker image config for values.yaml" +- name: "worker.nodeSelector" + required: false + description: "worker nodeSelector config for values.yaml" - name: "worker.extraEnvVars" required: false description: "worker extraEnvVars config for values.yaml" @@ -57,6 +66,9 @@ allow_parameters: - name: "statsd.enabled" required: false description: "statsd enabled or not config for values.yaml" +- name: "statsd.nodeSelector" + required: false + description: "statsd nodeSelector config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" diff --git a/addons/flink/1.17/meta.yaml b/addons/flink/1.17/meta.yaml index 81d6bec4..b40b6017 100644 --- a/addons/flink/1.17/meta.yaml +++ b/addons/flink/1.17/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "jobmanager.nodeSelector" + required: false + description: "jobmanager nodeSelector config for values.yaml" - name: "jobmanager.service.type" required: false description: "jobmanager service type config for values.yaml" @@ -24,6 +27,9 @@ allow_parameters: - name: "jobmanager.extraEnvVars" required: false description: "jobmanager extraEnvVars config for values.yaml" +- name: "taskmanager.nodeSelector" + required: false + description: "taskmanager nodeSelector config for values.yaml" - name: "taskmanager.service.type" required: false description: "taskmanager service type config for values.yaml" diff --git a/addons/kafka/3.6/meta.yaml b/addons/kafka/3.6/meta.yaml index 0db78223..a574f8a0 100644 --- a/addons/kafka/3.6/meta.yaml +++ b/addons/kafka/3.6/meta.yaml @@ -24,9 +24,15 @@ allow_parameters: - name: "controller.extraConfig" required: false description: "controller.extraConfig config for values.yaml" +- name: "controller.nodeSelector" + required: false + description: "controller.nodeSelector config for values.yaml" - name: "broker.extraConfig" required: false description: "broker.extraConfig config for values.yaml" +- name: "broker.nodeSelector" + required: false + description: "broker.nodeSelector config for values.yaml" - name: "listeners.client.protocol" required: false description: "listeners client protocol config for values.yaml" @@ -57,6 +63,9 @@ allow_parameters: - name: "metrics.jmx.enabled" required: false description: "metrics jmx enabled or not config for values.yaml" +- name: "metrics.nodeSelector" + required: false + description: "metrics nodeSelector config for values.yaml" - name: "metrics.kafka.enabled" required: false description: "metrics kafka enabled or not config for values.yaml" diff --git a/addons/minio/2023/meta.yaml b/addons/minio/2023/meta.yaml index 8443fff1..440a4f1c 100644 --- a/addons/minio/2023/meta.yaml +++ b/addons/minio/2023/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "nodeSelector" + required: false + description: "nodeSelector config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" diff --git a/addons/opensearch/2.10/meta.yaml b/addons/opensearch/2.10/meta.yaml index d26ac552..1189d899 100644 --- a/addons/opensearch/2.10/meta.yaml +++ b/addons/opensearch/2.10/meta.yaml @@ -24,18 +24,33 @@ allow_parameters: - name: "config" required: false description: "opensearch configuration for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" - name: "master.networkPolicy.allowNamespaces" required: false description: "master networkPolicy allowNamespaces config for values.yaml" +- name: "ingest.nodeSelector" + required: false + description: "ingest nodeSelector config for values.yaml" - name: "ingest.networkPolicy.allowNamespaces" required: false description: "ingest networkPolicy allowNamespaces config for values.yaml" +- name: "data.nodeSelector" + required: false + description: "data nodeSelector config for values.yaml" - name: "data.networkPolicy.allowNamespaces" required: false description: "data networkPolicy allowNamespaces config for values.yaml" +- name: "coordinating.nodeSelector" + required: false + description: "coordinating nodeSelector config for values.yaml" - name: "coordinating.networkPolicy.allowNamespaces" required: false description: "coordinating networkPolicy allowNamespaces config for values.yaml" +- name: "dashboards.nodeSelector" + required: false + description: "dashboards nodeSelector config for values.yaml" - name: "dashboards.networkPolicy.allowNamespaces" required: false description: "dashboards networkPolicy allowNamespaces config for values.yaml" diff --git a/addons/rabbitmq/3.12/meta.yaml b/addons/rabbitmq/3.12/meta.yaml index d65534de..7d2e5f0c 100644 --- a/addons/rabbitmq/3.12/meta.yaml +++ b/addons/rabbitmq/3.12/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "nodeSelector" + required: false + description: "nodeSelector config for values.yaml" - name: "initScripts" required: false description: "initScripts config for values.yaml" diff --git a/addons/spark/3.4/meta.yaml b/addons/spark/3.4/meta.yaml index a36a0691..083883cf 100644 --- a/addons/spark/3.4/meta.yaml +++ b/addons/spark/3.4/meta.yaml @@ -30,12 +30,18 @@ allow_parameters: - name: "master.extraEnvVars" required: false description: "master extraEnvVars config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" - name: "master.persistence" required: false description: "master persistence config for values.yaml" - name: "worker.configOptions" required: false description: "worker configOptions config for values.yaml" +- name: "worker.nodeSelector" + required: false + description: "worker nodeSelector config for values.yaml" - name: "worker.extraEnvVars" required: false description: "worker extraEnvVars config for values.yaml" diff --git a/addons/zookeeper/3.9/meta.yaml b/addons/zookeeper/3.9/meta.yaml index 2be725a0..c8c555cb 100644 --- a/addons/zookeeper/3.9/meta.yaml +++ b/addons/zookeeper/3.9/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "nodeSelector" + required: false + description: "nodeSelector config for values.yaml" - name: "networkPolicy.allowNamespaces" required: false description: "networkPolicy allowNamespaces config for values.yaml" From 9e7bc05f893e96e16e4b586401c1f9fd6ffc9de6 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 12 Dec 2024 16:30:27 +0800 Subject: [PATCH 28/93] chore(redis-cluster): update proxy version --- .../7.0/chart/redis-cluster/values.yaml | 4 ++-- .../7.0/plans/standard-1024/values.yaml | 12 ++++++------ .../redis-cluster/7.0/plans/standard-128/values.yaml | 12 ++++++------ .../7.0/plans/standard-16384/values.yaml | 12 ++++++------ .../7.0/plans/standard-2048/values.yaml | 10 +++++----- .../redis-cluster/7.0/plans/standard-256/values.yaml | 12 ++++++------ .../7.0/plans/standard-32768/values.yaml | 12 ++++++------ .../7.0/plans/standard-4096/values.yaml | 7 ++++--- .../redis-cluster/7.0/plans/standard-512/values.yaml | 12 ++++++------ .../7.0/plans/standard-65536/values.yaml | 12 ++++++------ .../7.0/plans/standard-8192/values.yaml | 7 ++++--- 11 files changed, 57 insertions(+), 55 deletions(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml index 0d8e9e11..5e910cd6 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml @@ -784,11 +784,11 @@ proxy: ## requests: {} ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## ## Configure extra options for Redis® liveness probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) diff --git a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml index bdcd4ac1..487a31fd 100644 --- a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi + cpu: 2 + memory: 4Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-128/values.yaml b/addons/redis-cluster/7.0/plans/standard-128/values.yaml index 17d084b4..b4ba110d 100644 --- a/addons/redis-cluster/7.0/plans/standard-128/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-128/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi + cpu: 2 + memory: 4Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml index 9a948d32..51f1b42a 100644 --- a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 8 + maxProcs: 4 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 1000 + backendIdleConnections: 300 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 8 - memory: 16Gi + cpu: 4 + memory: 8Gi requests: - cpu: 1 - memory: 2Gi + cpu: 2 + memory: 4Gi diff --git a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml index 438d9c6b..ad1cc06c 100644 --- a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi + cpu: 2 + memory: 4Gi requests: cpu: 1 - memory: 1Gi + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-256/values.yaml b/addons/redis-cluster/7.0/plans/standard-256/values.yaml index 507d3315..1984796e 100644 --- a/addons/redis-cluster/7.0/plans/standard-256/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-256/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi + cpu: 2 + memory: 4Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml index e7544356..db95057a 100644 --- a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 8 + maxProcs: 4 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 1000 + backendIdleConnections: 300 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 8 - memory: 16Gi + cpu: 4 + memory: 8Gi requests: - cpu: 1 - memory: 2Gi \ No newline at end of file + cpu: 2 + memory: 4Gi diff --git a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml index 2044b555..a7e8d25c 100644 --- a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml @@ -40,7 +40,7 @@ proxy: ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 300 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -51,5 +51,6 @@ proxy: cpu: 4 memory: 8Gi requests: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 4Gi + diff --git a/addons/redis-cluster/7.0/plans/standard-512/values.yaml b/addons/redis-cluster/7.0/plans/standard-512/values.yaml index 34d73d04..28d68a4e 100644 --- a/addons/redis-cluster/7.0/plans/standard-512/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-512/values.yaml @@ -36,11 +36,11 @@ redis: ## proxy: ## max-procs - maxProcs: 4 + maxProcs: 2 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 200 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi + cpu: 2 + memory: 4Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml index 2fe8ce9c..8f81cd1b 100644 --- a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml @@ -42,11 +42,11 @@ redis: ## proxy: ## max-procs - maxProcs: 8 + maxProcs: 4 ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 1000 + backendIdleConnections: 300 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -54,8 +54,8 @@ proxy: ## resources: limits: - cpu: 8 - memory: 16Gi + cpu: 4 + memory: 8Gi requests: - cpu: 1 - memory: 2Gi + cpu: 2 + memory: 4Gi diff --git a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml index dc9fa53b..ad08e73f 100644 --- a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml @@ -40,7 +40,7 @@ proxy: ## backend-init-connections backendInitConnections: 10 ## backend-idle-connections - backendIdleConnections: 500 + backendIdleConnections: 300 ## Proxy® resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param proxy.resources.limits The resources limits for the container @@ -51,5 +51,6 @@ proxy: cpu: 4 memory: 8Gi requests: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 4Gi + From 99c13f530e78044b5d9967c484cc8a14944ba442 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 16 Dec 2024 14:16:15 +0800 Subject: [PATCH 29/93] chore(kafka): updata kafka binding --- .../3.6/chart/kafka/templates/_helpers.tpl | 2 +- .../3.6/plans/standard-16c32g3w/bind.yaml | 68 ++++++++++++++----- .../kafka/3.6/plans/standard-1c2g3w/bind.yaml | 68 ++++++++++++++----- .../3.6/plans/standard-1c2g3w/values.yaml | 2 +- .../3.6/plans/standard-24c64g3w/bind.yaml | 68 ++++++++++++++----- .../kafka/3.6/plans/standard-2c4g3w/bind.yaml | 68 ++++++++++++++----- .../kafka/3.6/plans/standard-4c8g3w/bind.yaml | 68 ++++++++++++++----- .../3.6/plans/standard-8c16g3w/bind.yaml | 68 ++++++++++++++----- 8 files changed, 308 insertions(+), 104 deletions(-) diff --git a/addons/kafka/3.6/chart/kafka/templates/_helpers.tpl b/addons/kafka/3.6/chart/kafka/templates/_helpers.tpl index 9ca786fa..1b84b39c 100644 --- a/addons/kafka/3.6/chart/kafka/templates/_helpers.tpl +++ b/addons/kafka/3.6/chart/kafka/templates/_helpers.tpl @@ -583,7 +583,7 @@ Returns the internel listeners based on the number of controller-eligible nodes {{- $fullname := include "common.names.fullname" . -}} {{- $releaseNamespace := include "common.names.namespace" . -}} {{- range $i := until (int .Values.controller.replicaCount) -}} - {{- $nodeAddress := printf "%s-controller-%d.%s-controller-headless.%s.svc.%s:%d" $fullname (int $i) $fullname $releaseNamespace $.Values.clusterDomain (int $.Values.listeners.interbroker.containerPort) -}} + {{- $nodeAddress := printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname (int $i) $fullname $releaseNamespace $.Values.clusterDomain -}} {{- $internelListeners = append $internelListeners (printf "%s" $nodeAddress ) -}} {{- end -}} {{- join "," $internelListeners -}} diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml index 973e9859..c63052bc 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: hb-kafka-standard-1c2g3w +fullnameOverride: "hb-kafka-standard-1c2g3w" ## @section Controller-eligible statefulset parameters ## diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml index 91e82ba3..32f8ff85 100644 --- a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml @@ -1,10 +1,50 @@ credential: - - name: INTERNAL_LISTENERS - value: {{ include "kafka.kraft.internelListeners" . }} - - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + {{- if .Values.externalAccess.enabled }} + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + {{- end }} + + + {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $internelListeners }} + {{- end }} + + - name: KAFKA_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} - name: CLIENT_USERS @@ -15,14 +55,9 @@ credential: secretKeyRef: name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} jsonpath: '{ .data.client-passwords }' - - - name: SYSTEM_USER_PASSPORT - valueFrom: - secretKeyRef: - name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} - jsonpath: '{ .data.system-user-password }' {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} - name: INTER_BROKER_PASSWORDS valueFrom: @@ -40,18 +75,17 @@ credential: {{- end }} {{- end }} -{{- if .Values.externalAccess.enabled }} -{{- $fullname := include "common.names.fullname" . }} -{{- $replicaCount := .Values.controller.replicaCount | int }} -{{- range $i := until $replicaCount }} -{{- $targetPod := printf "%s-controller-%d" (printf "%s" $fullname) $i }} - - name: {{ printf "EXTERNAL_%d" $i }} + {{- if .Values.externalAccess.enabled }} + {{- $fullname := include "common.names.fullname" . }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} valueFrom: serviceRef: name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} + {{- end }} - - name: EXTERNAL_PORT + - name: EXTERNAL_KAFKA_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} {{- end }} From 0513bd361fdaf087bbe2530b3c28d4bec0ee0f60 Mon Sep 17 00:00:00 2001 From: duanhongyi Date: Mon, 23 Dec 2024 09:03:33 +0800 Subject: [PATCH 30/93] fix(wooddpecker): secsets are deprecated --- .woodpecker/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.woodpecker/build.yml b/.woodpecker/build.yml index 138adcb5..36df700b 100644 --- a/.woodpecker/build.yml +++ b/.woodpecker/build.yml @@ -25,8 +25,8 @@ steps: environment: REPO_NAME: ${CI_REPO_NAME} REPO_OWNER: ${CI_REPO_OWNER} - secrets: - - github_token + GITHUB_TOKEN: + from_secret: github_token when: - event: tag ref: refs/tags/v* From 3d3bdcade6d16dd559510f02a59b9aa5e86c947a Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 23 Dec 2024 18:10:49 +0800 Subject: [PATCH 31/93] chore(redis-cluster): update envoy proxy version --- .../redis-cluster/templates/configmap.yaml | 89 +++++++++++++++++++ .../templates/redis-statefulset.yaml | 15 ++-- .../7.0/plans/standard-1024/values.yaml | 6 +- .../7.0/plans/standard-128/values.yaml | 6 +- .../7.0/plans/standard-16384/values.yaml | 6 +- .../7.0/plans/standard-256/values.yaml | 6 +- .../7.0/plans/standard-32768/values.yaml | 6 +- .../7.0/plans/standard-4096/values.yaml | 6 +- .../7.0/plans/standard-512/values.yaml | 6 +- .../7.0/plans/standard-65536/values.yaml | 6 +- .../7.0/plans/standard-8192/values.yaml | 7 +- 11 files changed, 121 insertions(+), 38 deletions(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml index 75baff9f..85b06212 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml @@ -11,6 +11,95 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: + {{- if .Values.proxy.enabled }} + redis-proxy-default.yaml: |- + overload_manager: + resource_monitors: + - name: "envoy.resource_monitors.global_downstream_max_connections" + typed_config: + "@type": type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig + max_active_downstream_connections: 10000 + static_resources: + listeners: + - name: redis_listener + address: + socket_address: + address: 0.0.0.0 + port_value: {{ .Values.proxy.containerPorts.proxy }} + filter_chains: + - filters: + - name: envoy.filters.network.redis_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy + stat_prefix: egress_redis + settings: + op_timeout: 5s + prefix_routes: + catch_all_route: + cluster: redis_cluster + downstream_auth_username: + inline_string: "default" + downstream_auth_passwords: + - inline_string: {REDIS_PASSWORD} + {{- if .Values.tls.enabled }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + require_client_certificate: true + common_tls_context: + tls_certificates: + - certificate_chain: + filename: {{ template "redis-cluster.tlsCert" . }} + private_key: + filename: {{ template "redis-cluster.tlsCertKey" . }} + validation_context: + trusted_ca: + filename: {{ template "redis-cluster.tlsCACert" . }} + {{- end }} + clusters: + - name: redis_cluster + cluster_type: + name: envoy.clusters.redis + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + cluster_refresh_rate: 30s + cluster_refresh_timeout: 10s + connect_timeout: 4s + dns_lookup_family: V4_ONLY + lb_policy: CLUSTER_PROVIDED + load_assignment: + cluster_name: redis_cluster + endpoints: + lb_endpoints: + endpoint: + address: + socket_address: { address: 127.0.0.1, port_value: {{ .Values.redis.containerPorts.redis | quote }} } + typed_extension_protocol_options: + envoy.filters.network.redis_proxy: + "@type": type.googleapis.com/google.protobuf.Struct + value: + auth_username: + inline_string: "default" + auth_password: + inline_string: {REDIS_PASSWORD} + {{- if .Values.tls.enabled }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + tls_certificates: + certificate_chain: + filename: {{ template "redis-cluster.tlsCert" . }} + private_key: + filename: {{ template "redis-cluster.tlsCertKey" . }} + validation_context: + trusted_ca: + filename: {{ template "redis-cluster.tlsCACert" . }} + {{- end }} + {{- end }} redis-default.conf: |- # Redis configuration file example. # diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml index 4f9eb1df..20961c4a 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/templates/redis-statefulset.yaml @@ -318,17 +318,12 @@ spec: break fi done - # Start redis cluster proxy + # Start envoy redis proxy {{- if .Values.usePasswordFile }} export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" {{- end }} - redis-cluster-proxy \ - -startup-nodes=127.0.0.1:{{ .Values.redis.containerPorts.redis }} \ - -addr=0.0.0.0:{{ .Values.proxy.containerPorts.proxy }} \ - -password=$(REDIS_PASSWORD) \ - -max-procs={{ .Values.proxy.maxProcs }} \ - -backend-init-connections={{ .Values.proxy.backendInitConnections }} \ - -backend-idle-connections={{ .Values.proxy.backendIdleConnections }} + sed s/{REDIS_PASSWORD}/${REDIS_PASSWORD}/g /opt/drycc/redis/etc/redis-proxy-default.yaml > /opt/drycc/redis/etc/redis-proxy.yaml + supervisord {{- end }} env: {{- if and .Values.usePassword (not .Values.usePasswordFile) }} @@ -389,8 +384,8 @@ spec: - name: scripts mountPath: /scripts - name: default-config - mountPath: /opt/drycc/redis/etc/redis-proxy-default.toml - subPath: redis-proxy-default.toml + mountPath: /opt/drycc/redis/etc/redis-proxy-default.yaml + subPath: redis-proxy-default.yaml {{- if .Values.usePasswordFile }} - name: redis-password mountPath: /opt/drycc/redis/secrets/ diff --git a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml index 487a31fd..66f31221 100644 --- a/addons/redis-cluster/7.0/plans/standard-1024/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-1024/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi - requests: cpu: 1 memory: 2Gi + requests: + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-128/values.yaml b/addons/redis-cluster/7.0/plans/standard-128/values.yaml index b4ba110d..fe6836ee 100644 --- a/addons/redis-cluster/7.0/plans/standard-128/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-128/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi - requests: cpu: 1 memory: 2Gi + requests: + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml index 51f1b42a..02861f15 100644 --- a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi - requests: cpu: 2 memory: 4Gi + requests: + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-256/values.yaml b/addons/redis-cluster/7.0/plans/standard-256/values.yaml index 1984796e..aa99d8c2 100644 --- a/addons/redis-cluster/7.0/plans/standard-256/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-256/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi - requests: cpu: 1 memory: 2Gi + requests: + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml index db95057a..c48bd12e 100644 --- a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi - requests: cpu: 2 memory: 4Gi + requests: + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml index a7e8d25c..9c5f8c8f 100644 --- a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml @@ -48,9 +48,9 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi - requests: cpu: 2 memory: 4Gi + requests: + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-512/values.yaml b/addons/redis-cluster/7.0/plans/standard-512/values.yaml index 28d68a4e..73ab678b 100644 --- a/addons/redis-cluster/7.0/plans/standard-512/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-512/values.yaml @@ -48,8 +48,8 @@ proxy: ## resources: limits: - cpu: 2 - memory: 4Gi - requests: cpu: 1 memory: 2Gi + requests: + cpu: 500m + memory: 1Gi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml index 8f81cd1b..07b17e0a 100644 --- a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml @@ -54,8 +54,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi - requests: cpu: 2 memory: 4Gi + requests: + cpu: 1 + memory: 2Gi diff --git a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml index ad08e73f..9e57254f 100644 --- a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml @@ -48,9 +48,8 @@ proxy: ## resources: limits: - cpu: 4 - memory: 8Gi - requests: cpu: 2 memory: 4Gi - + requests: + cpu: 1 + memory: 2Gi From e035fc6cdb7d8a54c8d346490671c4bac68e1fee Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 24 Dec 2024 15:20:30 +0800 Subject: [PATCH 32/93] chore(minio): minio meta allow extraEnvVars paras --- addons/minio/2023/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/addons/minio/2023/meta.yaml b/addons/minio/2023/meta.yaml index 440a4f1c..0d5cdf46 100644 --- a/addons/minio/2023/meta.yaml +++ b/addons/minio/2023/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "extraEnvVars" + required: false + description: "extraEnvVars config for values.yaml" - name: "nodeSelector" required: false description: "nodeSelector config for values.yaml" From d3938a351af7d85118bcb1d76e879bf51b700ba2 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 2 Jan 2025 10:11:30 +0800 Subject: [PATCH 33/93] chore(addons): optimize multiple addons (#93) * chore(mysql-cluster): modify networkpolicy * fix(prometheus): fix typo * chore(prometheus): add plans 50 * chore(mysql-cluster): add networkpolicy * chore(prometheus): add networkpolicy * chore(promtheus): service discovery in namespace * chore(mysql-cluster): plans 10 * chore(mysql-cluster): add max_connection_limit * chore(mysql-cluster): add plans * chore(prometheus): support discovery addons in namespaces * fix(prometheus): is enabled scrape addons metrics * feat(drycc-addons): add postgresql-cluster * chore(postgresql-cluster): move patroni env to helper * chore(postgresql-cluster):adjust patroni config use configmap * chore(postgresql-cluster):organize code structure * chore(postgresql-cluster): add metrics * chore(postgresql-cluster): add metrics * chore(postgresql-cluster): rename postgresql to postgresql-cluster * chore(postgresql-cluster) add wal-g for backup * chore(postgresql-cluster): redirect postgresql log * chore(postgresql-cluster): add pgbackup * chore(postgresql-cluster): add plans * chore(addons): add cloudbeaver * chore(addons): fix plan binds * chore(prometheus): add auth * fix(postgresql-cluster): service type nil * fix(cloudbeaver): labels application * fix(postgresql-cluster): plans describe typo * chore(postgresql-cluster): allow dyrcc params * chore(postgresql-cluster): generate password random * chore(postgresql-cluster): Adjust backup strategy * chore(postgresql-cluster): bind info * chore(postgresql-cluster): fix typo * chore(postgresql-cluster): megre * chore(postgresql-cluster): megre * chore(cloudbeaver): modify storage method * chore(postgresql-cluster): adjust * chore(postgresql-cluster): fix password & netpolicy * chore(addons): add persistentVolumeClaimRetentionPolicy * chore(addons): update support * megre(addons): megre from upstream * chore(postgresql-cluster): reset wal retain size * chore(postgresql-cluster):add hugepages-2Mi limit in plans * chore(addons): alter cloudbeaver plan to 10, prometheus add hotupdate param * chore(mysql-cluster): adjust bind params * chore(postgresql-cluster): add plan 4t * chore(postgresql-cluster): adjust pg params & monitor user privilege * chore(mysql-cluster): set persistentVolumeClaimRetentionPolicy deleted * chore(mysql-cluster): persistence group_replication_group_name after greate new cluster * chore(mysql-cluster): delete charts common * chore(postgresql-cluster): reset max_slot_wal_keep_size * chore(mysql): Re-implementing the cluster implementation * chore(postgresql-cluster): fix 4t plan * chore(addons): add pmm * chore(pmm): set instance name * chore(prometheus): add scrape namespace * chore(postgres): set service type to ClusterIP * chore(postgres): update metrics default values * chore(mysql-cluster): add router configmap , add resources limits to router and metrics * chore(mysql-cluster): modify networkpolicy rules. empty ingress when router service type is loadbalancer * chore(postgresql-cluster): Allow all ip when service type is Loadbalancer * chore(cloudbeaver): add networkpolicy ,Allow all ip when service type is Loadbalancer * chore(cloudbeaver): fix typo * chore(pmm): pmm network support * chore(pmm): fix pmm chart.yaml * chore(pmm): fix pmm chart.yaml * chore(addons): against plans to standard specifications * chore(mysql-cluster): fix standard-1c2g10 * chore(addons) add mongodb * chore(mongodb) add networkpolicy allownamespaces * chore(addons):changes cluster role to role and clusterrolebinding to rolebinding * megre(addons): megre upstream * chore(mysql-cluster) fix 1c3g10 bind typo * chore(addons):delete mongodb * chore(addons): add mongodb * chore(grafana): add plans * chore(prometheus): add namespace containers base metrics * chore(grafana): add plans * chore(pmm): add plans * chore(prometheus): add plans * chore(mongodb): enable metrics * chore(mongodb): enable metrics * fix(mongodb): networkpolicy for exporter * megre(prometheus): megre upstream * megre(mongodb): megre upstream * chore(mongodb): add plans * fix(mongodb): plan meta uuid and metrics probe * chore(mongodb): add user for exporter * chore(mysql): update images registry address * chore(prometheus): support kubernetes services probe * chore(postgresql-cluster): use shared memory * fix(mysql-cluster): fix plans typo * fix(prometheus): add alertmanager config * chore(mysql-cluster): add backup * chore(prometheus): add custom scrape * chore(addons): add clickhouse * chore(addons): modify cronjob images pull policy * chore(clickhouse): fix chart version * chore(clickhouse): fix chart version * chore(mysql-cluster): change dump default theads to 1 for avoid use too many mem * chore(clickhouse): add plans * chore(clickhouse): update use keeper * chore(clickhouse): change to use shard0 keeper cluster * chore(fluentbit): add java_multiline * chore(fluentbit): optimize java_multiline * chore(fluentbit): add plans * chore(fluentbit):add plans * megre(addons) * chore(mysql-cluster): add router resources limits * chore(mysql-cluster): add metrics collects * chore(prometheus): scrape mysql metrics with params * chore(prometheus): modify mysql scrape params * chore(fluentbit): add gateway multiline * chore(clickhouse): add timezone * chore(fluentbit): add multiline config * chore(postgresql-cluster): add postgresql-logicalbackup * fix(prometheus): scrape mysql * chore(addons): modify plans requests * fix(mysql-cluster): router limits * fix(mysql-cluster): router limits * fix(mysql-cluster): remove metrics probe * chore(mysql-cluster): optimisation metrics * chore(prometheus): drop mysql metrics * fix(prometheus): Remove redundant code * fix(prometheus): configmap-reload images address * chore(fluentbit): timezone +8 * chore(mysql-cluster): bind add domain * chore(postgresql-cluster): bind add domain * chore(addons: add ydb) * chore(clickhouse): add plan 32c64g12000 * chore(grafana): disable update * chore(mysql-cluster):remove plan1c2g10 * chore(grafana): add node selector * chore(prometheus): add node selector * chore(clickhouse) set internal_replication true and other params about memory * chore(postgresql-cluster): remove 1c2g plan * chore(mysql-cluster): Support different MySQL initialization parameters against plan --------- Co-authored-by: lijianguo --- .../24/chart/clickhouse/values.yaml | 9 ++- .../24/plans/standard-16c64g1000/values.yaml | 5 +- .../24/plans/standard-2c4g20/meta.yaml | 2 +- .../24/plans/standard-2c4g20/values.yaml | 3 + .../24/plans/standard-32c64g12000/values.yaml | 5 +- .../24/plans/standard-4c16g100/values.yaml | 3 + .../24/plans/standard-8c32g500/values.yaml | 5 +- addons/grafana/10/meta.yaml | 2 + .../templates/primary/configmap.yaml | 1 + .../templates/primary/statefulset.yaml | 4 - .../8.0/chart/mysql-cluster/values.yaml | 16 +++- addons/mysql-cluster/8.0/meta.yaml | 6 ++ .../8.0/plans/standard-16c64g400/values.yaml | 20 ++++- .../8.0/plans/standard-2c4g20/values.yaml | 29 ++++++- .../8.0/plans/standard-2c8g50/values.yaml | 28 ++++++- .../8.0/plans/standard-32c128g800/values.yaml | 20 ++++- .../8.0/plans/standard-4c16g100/values.yaml | 23 +++++- .../8.0/plans/standard-8c32g200/values.yaml | 20 ++++- .../15/chart/postgresql-cluster/values.yaml | 2 +- .../15/plans/standard-1c2g10/bind.yaml | 42 ---------- .../create-instance-schema.json | 12 --- .../15/plans/standard-1c2g10/meta.yaml | 6 -- .../15/plans/standard-1c2g10/values.yaml | 78 ------------------- addons/prometheus/2/meta.yaml | 6 ++ 24 files changed, 188 insertions(+), 159 deletions(-) delete mode 100644 addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml delete mode 100644 addons/postgresql-cluster/15/plans/standard-1c2g10/create-instance-schema.json delete mode 100644 addons/postgresql-cluster/15/plans/standard-1c2g10/meta.yaml delete mode 100644 addons/postgresql-cluster/15/plans/standard-1c2g10/values.yaml diff --git a/addons/clickhouse/24/chart/clickhouse/values.yaml b/addons/clickhouse/24/chart/clickhouse/values.yaml index e711b6b1..720fa9b7 100644 --- a/addons/clickhouse/24/chart/clickhouse/values.yaml +++ b/addons/clickhouse/24/chart/clickhouse/values.yaml @@ -278,6 +278,8 @@ keeper: defaultConfigurationOverrides: | Asia/Shanghai + {{ printf "%.0f" .Values.MaxServerMemoryUsage }} + {{ printf "%.0f" .Values.MergesMutationsMemoryUsageSoftLimit }} @@ -294,8 +296,8 @@ defaultConfigurationOverrides: | {{- $shards := $.Values.shards | int }} {{- range $shard, $e := until $shards }} - true + true {{- $replicas := $.Values.replicaCount | int }} {{- range $i, $_e := until $replicas }} @@ -321,10 +323,13 @@ defaultConfigurationOverrides: | /drycc/clickhouse/keeper/coordination/log /drycc/clickhouse/keeper/coordination/snapshots - + {{ printf "%.0f" .Values.MaxServerMemoryUsage }} + 10000 30000 + {{ printf "%.0f" .Values.MergesMutationsMemoryUsageSoftLimit }} + true trace diff --git a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml index 6c4b2f47..96c4ccbd 100644 --- a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml +++ b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml @@ -20,4 +20,7 @@ resources: memory: 16Gi persistence: - size: 1000Gi \ No newline at end of file + size: 1000Gi + +MaxServerMemoryUsage: 59055800320 +MergesMutationsMemoryUsageSoftLimit: 34359738368 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-2c4g20/meta.yaml b/addons/clickhouse/24/plans/standard-2c4g20/meta.yaml index ca409ee7..6ae80b84 100644 --- a/addons/clickhouse/24/plans/standard-2c4g20/meta.yaml +++ b/addons/clickhouse/24/plans/standard-2c4g20/meta.yaml @@ -1,6 +1,6 @@ name: "standard-2c4g20" id: 9866afb5-eeb1-4c89-a6d6-01197ff34bbc -description: "clickhouse standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G " +description: "clickhouse standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G , DO NOT USE THIS PLAN, only for test " displayName: "standard-2c4g20" bindable: true maximum_polling_duration: 1800 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-2c4g20/values.yaml b/addons/clickhouse/24/plans/standard-2c4g20/values.yaml index 254b4fea..0c5d38c4 100644 --- a/addons/clickhouse/24/plans/standard-2c4g20/values.yaml +++ b/addons/clickhouse/24/plans/standard-2c4g20/values.yaml @@ -21,3 +21,6 @@ resources: persistence: size: 20Gi + +MaxServerMemoryUsage: 3221225472 +MergesMutationsMemoryUsageSoftLimit: 2147483648 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml index 8be44d06..a933b9da 100644 --- a/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml +++ b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml @@ -20,4 +20,7 @@ resources: memory: 16Gi persistence: - size: 12000Gi \ No newline at end of file + size: 12000Gi + +MaxServerMemoryUsage: 59055800320 +MergesMutationsMemoryUsageSoftLimit: 34359738368 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml index 002cc894..39aa6b39 100644 --- a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml +++ b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml @@ -21,3 +21,6 @@ resources: persistence: size: 100Gi + +MaxServerMemoryUsage: 10737418240 +MergesMutationsMemoryUsageSoftLimit: 8589934592 \ No newline at end of file diff --git a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml index dab1c884..117e2e58 100644 --- a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml +++ b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml @@ -20,4 +20,7 @@ resources: memory: 8Gi persistence: - size: 500Gi \ No newline at end of file + size: 500Gi + +MaxServerMemoryUsage: 10737418240 +MergesMutationsMemoryUsageSoftLimit: 8589934592 \ No newline at end of file diff --git a/addons/grafana/10/meta.yaml b/addons/grafana/10/meta.yaml index 4a066bd4..8e4a9eac 100644 --- a/addons/grafana/10/meta.yaml +++ b/addons/grafana/10/meta.yaml @@ -21,4 +21,6 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "grafana.nodeSelector" + required: false archive: false diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/configmap.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/configmap.yaml index e3bf8c93..2f497755 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/configmap.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/configmap.yaml @@ -20,4 +20,5 @@ metadata: data: my.cnf: |- {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraConfig "context" $ ) | nindent 4 }} {{- end -}} diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/statefulset.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/statefulset.yaml index 822a407b..2a444c87 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/statefulset.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/primary/statefulset.yaml @@ -160,10 +160,6 @@ spec: - name: MYSQL_DATABASE value: {{ .Values.auth.database | quote }} {{- end }} - {{- if .Values.primary.maxConnectionLimit }} - - name: MAX_CONNECTION_LIMIT - value: {{ .Values.primary.maxConnectionLimit | quote }} - {{- end }} {{- if or (eq .Values.architecture "replication") (eq .Values.architecture "mgr") }} - name: MYSQL_REPLICATION_MODE value: "master" diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index d32c9c56..1639f6bd 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -180,7 +180,6 @@ initdbScripts: echo report_host=$mgr_host >> $base_conf_file echo loose-group_replication_group_seeds="$svc_mgr_host-0:24901,$svc_mgr_host-1:24901,$svc_mgr_host-2:24901" >> $base_conf_file echo loose-group_replication_start_on_boot='OFF' >> $base_conf_file - echo max_connections=$MAX_CONNECTION_LIMIT >> $base_conf_file ## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) ## @@ -212,7 +211,14 @@ primary: ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file ## maxConnectionLimit: 2000 - + ## against plans + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + thread_cache_size=200 + configuration: |- [mysqld] # server @@ -241,6 +247,8 @@ primary: sql_require_primary_key=ON log_error_suppression_list='MY-013360' binlog_transaction_dependency_tracking=WRITESET + default-time-zone='+8:00' + local_infile=ON # Replication log_bin=mysql-bin @@ -606,8 +614,10 @@ router: replicaCount: 2 configuration: |- + [DEFAULT] + max_total_connections = 1000 [routing:bootstrap_rw] - max_connections=1000 + max_connections= 1000 ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. ## NOTE: When it's set the 'configuration' parameter is ignored diff --git a/addons/mysql-cluster/8.0/meta.yaml b/addons/mysql-cluster/8.0/meta.yaml index df1189e4..9e3db304 100644 --- a/addons/mysql-cluster/8.0/meta.yaml +++ b/addons/mysql-cluster/8.0/meta.yaml @@ -27,4 +27,10 @@ allow_parameters: - name: "backup" required: false description: "backup config for values.yaml" +- name: "primary.nodeSelector" + required: false + description: "primary.nodeSelector for values.yaml" +- name: "router.nodeSelector" + required: false + description: "router.nodeSelector for values.yaml" archive: false \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml index f346d776..e897821a 100644 --- a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml @@ -12,7 +12,19 @@ fullnameOverride: hb-mysql-cluster-standard-400 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 1600 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=32 + innodb_write_io_threads=32 + innodb_buffer_pool_instances=16 + innodb_buffer_pool_size=42949672960 + max_connections=16000 resources: limits: cpu: 16000m @@ -60,6 +72,12 @@ primary: selector: {} router: + replicaCount: 4 + configuration: |- + [DEFAULT] + max_total_connections = 3900 + [routing:bootstrap_rw] + max_connections= 3900 resources: limits: cpu: 1600m diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml index d9422e70..40a62583 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml @@ -12,7 +12,19 @@ fullnameOverride: hb-mysql-cluster-standard-20 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 1000 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=4 + innodb_write_io_threads=4 + innodb_buffer_pool_instances=2 + innodb_buffer_pool_size=2147483648 + max_connections=1000 resources: limits: cpu: 2000m @@ -59,3 +71,18 @@ primary: ## app: my-app ## selector: {} + +router: + replicaCount: 2 + configuration: |- + [DEFAULT] + max_total_connections = 400 + [routing:bootstrap_rw] + max_connections= 400 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml index bc9fd336..5cd7245b 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml @@ -12,7 +12,19 @@ fullnameOverride: hb-mysql-cluster-standard-50 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 2000 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=4 + innodb_write_io_threads=4 + innodb_buffer_pool_instances=2 + innodb_buffer_pool_size=4294967296 + max_connections=2000 resources: limits: cpu: 2000m @@ -58,3 +70,17 @@ primary: ## app: my-app ## selector: {} +router: + replicaCount: 2 + configuration: |- + [DEFAULT] + max_total_connections = 900 + [routing:bootstrap_rw] + max_connections= 900 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml index c25affff..92916170 100644 --- a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml @@ -12,7 +12,19 @@ fullnameOverride: hb-mysql-cluster-standard-800 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 32000 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=64 + innodb_write_io_threads=64 + innodb_buffer_pool_instances=32 + innodb_buffer_pool_size=85899345920 + max_connections=32000 resources: limits: cpu: 32000m @@ -59,6 +71,12 @@ primary: ## selector: {} router: + replicaCount: 4 + configuration: |- + [DEFAULT] + max_total_connections = 7900 + [routing:bootstrap_rw] + max_connections= 7900 resources: limits: cpu: 3200m diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml index 68412b31..9fdcd02b 100644 --- a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml @@ -12,14 +12,27 @@ fullnameOverride: hb-mysql-cluster-standard-100 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 4000 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=8 + innodb_write_io_threads=8 + innodb_buffer_pool_instances=4 + innodb_buffer_pool_size=10737418240 + max_connections=4000 + resources: limits: cpu: 4000m memory: 16Gi requests: cpu: 4000m - memory: 16Gi + memory: 12Gi ## @section Persistence parameters @@ -60,6 +73,12 @@ primary: selector: {} router: + replicaCount: 3 + configuration: |- + [DEFAULT] + max_total_connections = 1200 + [routing:bootstrap_rw] + max_connections= 1200 resources: limits: cpu: 500m diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml index 4061331e..5bd7daef 100644 --- a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml @@ -12,7 +12,19 @@ fullnameOverride: hb-mysql-cluster-standard-200 ## @param resources.requests The requested resources for the MinIO® container ## primary: - maxConnectionLimit: 8000 + extraConfig: | + [mysqld] + innodb_io_capacity=2000 + innodb_io_capacity_max=3000 + max_connect_errors=1000000 + open_files_limit=2000000 + performance_schema_max_table_instances=200 + thread_cache_size=200 + innodb_read_io_threads=16 + innodb_write_io_threads=16 + innodb_buffer_pool_instances=8 + innodb_buffer_pool_size=22548578304 + max_connections=8000 resources: limits: cpu: 8000m @@ -60,6 +72,12 @@ primary: selector: {} router: + replicaCount: 4 + configuration: |- + [DEFAULT] + max_total_connections = 1900 + [routing:bootstrap_rw] + max_connections= 1900 resources: limits: cpu: 800m diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index efa6be7a..7e3ca476 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -356,7 +356,7 @@ persistentVolume: ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## - # storageClass: "-" + storageClass: "" subPath: "" mountPath: "/home/postgres/pgdata" annotations: {} diff --git a/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml b/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml deleted file mode 100644 index 43111931..00000000 --- a/addons/postgresql-cluster/15/plans/standard-1c2g10/bind.yaml +++ /dev/null @@ -1,42 +0,0 @@ -credential: -{{- if (eq .Values.service.type "LoadBalancer") }} - - name: EXTRANET_MASTER_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }}-master - jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - - name: EXTRANET_REPL_HOST - valueFrom: - serviceRef: - name: {{ template "common.names.fullname" . }}-repl - jsonpath: '{ .status.loadBalancer.ingress[*].ip }' -{{- end }} - - name: DOMAIN_MASTER - value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - - name: DOMAIN_REPL - value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} - - name: MASTER_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }}-master - jsonpath: '{ .spec.clusterIP }' - - name: REPL_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }}-repl - jsonpath: '{ .spec.clusterIP }' - - name: PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }} - jsonpath: '{ .data.admin-password }' - - name: USERNAME - valueFrom: - secretKeyRef: - name: {{ template "common.names.fullname" . }} - jsonpath: '{ .data.admin-user }' - - name: PORT - value: 5432 - - name: DADABASE - value: postgres - diff --git a/addons/postgresql-cluster/15/plans/standard-1c2g10/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-1c2g10/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/postgresql-cluster/15/plans/standard-1c2g10/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-1c2g10/meta.yaml b/addons/postgresql-cluster/15/plans/standard-1c2g10/meta.yaml deleted file mode 100644 index ecec7095..00000000 --- a/addons/postgresql-cluster/15/plans/standard-1c2g10/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-1c2g10" -id: 83c3b52e-2685-4362-9ea1-42e170060c78 -description: "Postgresql Cluster standard-10 plan: Disk 10Gi ,vCPUs 1 , RAM 2G , DB MAX Connection 600" -displayName: "standard-1c2g10" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/15/plans/standard-1c2g10/values.yaml b/addons/postgresql-cluster/15/plans/standard-1c2g10/values.yaml deleted file mode 100644 index 80634ab3..00000000 --- a/addons/postgresql-cluster/15/plans/standard-1c2g10/values.yaml +++ /dev/null @@ -1,78 +0,0 @@ -## @param fullnameOverride String to fully override common.names.fullname template -## -fullnameOverride: hb-postgresql-cluster-standard-10 - -postgresql: - config: |- - # Connectivity - max_connections = 980 - superuser_reserved_connections = 3 - - # Memory Settings - shared_buffers = '256 MB' - work_mem = '32 MB' - maintenance_work_mem = '320 MB' - huge_pages = off - effective_cache_size = '1 GB' - effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function - random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) - - # Monitoring - track_io_timing=on # measure exact block IO times - track_functions=pl # track execution times of pl-language procedures if any - - # Replication - max_wal_senders = 10 - synchronous_commit = on - - # Checkpointing: - checkpoint_timeout = '15 min' - checkpoint_completion_target = 0.9 - max_wal_size = '1 GB' - min_wal_size = '512 MB' - - # WAL writing - wal_compression = on - wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) - wal_writer_delay = 200ms - wal_writer_flush_after = 1MB - wal_keep_size = '1 GB' - - # Background writer - bgwriter_delay = 200ms - bgwriter_lru_maxpages = 100 - bgwriter_lru_multiplier = 2.0 - bgwriter_flush_after = 0 - - # Parallel queries: - max_worker_processes = 2 - max_parallel_workers_per_gather = 1 - max_parallel_maintenance_workers = 1 - max_parallel_workers = 2 - parallel_leader_participation = on - - # Advanced features - enable_partitionwise_join = on - enable_partitionwise_aggregate = on - jit = on - max_slot_wal_keep_size = '1000 MB' - track_wal_io_timing = on - maintenance_io_concurrency = 100 - -resources: - # If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 1000m - memory: 2Gi - hugepages-2Mi: 4Mi - requests: - cpu: 1000m - memory: 2Gi - -persistentVolume: - enabled: true - size: 10Gi - -shmVolume: - sizeLimit: "1Gi" \ No newline at end of file diff --git a/addons/prometheus/2/meta.yaml b/addons/prometheus/2/meta.yaml index 50079cc3..97504a6d 100644 --- a/addons/prometheus/2/meta.yaml +++ b/addons/prometheus/2/meta.yaml @@ -51,4 +51,10 @@ allow_parameters: - name: "server.evaluationInterval" required: false description: "evaluationInterval values.yaml" +- name: "server.nodeSelector" + required: false + description: "server nodeSelector" +- name: "alertmanager.nodeSelector" + required: false + description: "alertmanager nodeSelector" archive: false From 941c3b8b39456990be8c81ebfed5193ec210af1c Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 8 Jan 2025 15:10:49 +0800 Subject: [PATCH 34/93] chore(postgresql-cluster): optimization of database parameters --- .../templates/networkpolicy.yaml | 7 ++++++- .../15/chart/postgresql-cluster/values.yaml | 21 ++++++++++++------- .../15/plans/standard-16c64g400/values.yaml | 11 ++++++---- .../15/plans/standard-2c4g20/values.yaml | 17 ++++++++------- .../15/plans/standard-2c8g50/values.yaml | 14 ++++++++----- .../15/plans/standard-32c128g800/values.yaml | 12 +++++++---- .../15/plans/standard-32c64g4000/values.yaml | 12 +++++++---- .../15/plans/standard-4c16g100/values.yaml | 12 +++++++---- .../15/plans/standard-8c32g200/values.yaml | 12 +++++++---- 9 files changed, 76 insertions(+), 42 deletions(-) diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/networkpolicy.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/networkpolicy.yaml index cf6bfb01..19ff2288 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/networkpolicy.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/networkpolicy.yaml @@ -23,11 +23,16 @@ spec: - ports: - port: 5432 - port: 9000 + - port: 80 + - port: 8008 {{- if and .Values.metrics.enabled }} - port: {{ .Values.metrics.containerPort }} - {{ end }} + {{ end }} {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: backup {{- if .Values.networkPolicy.allowCurrentNamespace }} - namespaceSelector: matchLabels: diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index 7e3ca476..95d57e54 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -54,9 +54,6 @@ preInitScript: | restapi: listen: 0.0.0.0:8008 connect_address: 0.0.0.0:8008 - authentication: - username: NzUwNjg3MTEtMDgzOS00YTNkLWEyNjAt - password: YjJjMDNjYjQtMDA0Ny00NTgwLTgwYjMt bootstrap: dcs: ttl: 30 @@ -75,16 +72,19 @@ preInitScript: | - host replication postgres 0.0.0.0/0 scram-sha-256 custom_conf: '/opt/drycc/postgresql/config/custom_conf.conf' parameters: - wal_level: hot_standby + max_connections: {{ .Values.patroni.pgParameters.max_connections }} + max_worker_processes: {{ .Values.patroni.pgParameters.max_worker_processes }} + max_parallel_workers: {{ .Values.patroni.pgParameters.max_parallel_workers }} + wal_level: logical hot_standby: "on" - max_connections: 1005 - max_worker_processes: 8 max_wal_senders: 10 max_replication_slots: 10 hot_standby_feedback: on max_prepared_transactions: 0 max_locks_per_transaction: 64 wal_log_hints: "on" + wal_keep_size: "1 GB" + max_slot_wal_keep_size: {{ .Values.patroni.pgParameters.max_slot_wal_keep_size | quote }} track_commit_timestamp: "off" archive_mode: "on" archive_timeout: 300s @@ -194,14 +194,19 @@ postgresql: log_min_duration_statement = 1000 max_wal_size = 4GB min_wal_size = 4GB - max_connections = 1005 - max_worker_processes = 8 max_wal_senders = 10 max_replication_slots = 10 max_prepared_transactions = 0 max_locks_per_transaction = 64 patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '2 GB' + + ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml index a14f012e..8fe5e689 100644 --- a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml @@ -1,11 +1,16 @@ ## @param fullnameOverride String to fully override common.names.fullname template ## fullnameOverride: hb-postgresql-cluster-standard-400 +patroni: + pgParameters: + max_worker_processes: 32 + max_parallel_workers: 16 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' postgresql: config: |- # Connectivity - max_connections = 2000 superuser_reserved_connections = 3 # Memory Settings @@ -28,7 +33,7 @@ postgresql: # Checkpointing: checkpoint_timeout = '30 min' checkpoint_completion_target = 0.9 - max_wal_size = '6 GB' + max_wal_size = '16 GB' min_wal_size = '2 GB' # WAL writing @@ -36,7 +41,6 @@ postgresql: wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '10 GB' # Background writer bgwriter_delay = 200ms @@ -45,7 +49,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 16 max_parallel_workers_per_gather = 8 max_parallel_maintenance_workers = 8 max_parallel_workers = 16 diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml index 1f1b9a1b..27ea5c42 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-20 +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 1000 + max_slot_wal_keep_size: '2 GB' + postgresql: config: |- # Connectivity - max_connections = 1000 superuser_reserved_connections = 3 # Memory Settings @@ -29,7 +35,7 @@ postgresql: checkpoint_timeout = '15 min' checkpoint_completion_target = 0.9 max_wal_size = '2 GB' - min_wal_size = '512 MB' + min_wal_size = '1 GB' # WAL writing @@ -37,8 +43,6 @@ postgresql: wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '1 GB' - # Background writer bgwriter_delay = 200ms @@ -46,18 +50,15 @@ postgresql: bgwriter_lru_multiplier = 2.0 bgwriter_flush_after = 0 - # Parallel queries: - max_worker_processes = 2 + # Parallel queries: max_parallel_workers_per_gather = 1 max_parallel_maintenance_workers = 1 - max_parallel_workers = 2 parallel_leader_participation = on # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on jit = on - max_slot_wal_keep_size = '1000 MB' track_wal_io_timing = on maintenance_io_concurrency = 100 diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml index 293a8a45..1062740f 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-50 +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 2000 + max_slot_wal_keep_size: '5 GB' + postgresql: config: |- # Connectivity - max_connections = 1000 superuser_reserved_connections = 3 # Memory Settings @@ -28,15 +34,14 @@ postgresql: # Checkpointing: checkpoint_timeout = '15 min' checkpoint_completion_target = 0.9 - max_wal_size = '2 GB' - min_wal_size = '512 MB' + max_wal_size = '4 GB' + min_wal_size = '1 GB' # WAL writing wal_compression = on wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '3 GB' # Background writer @@ -46,7 +51,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 2 max_parallel_workers_per_gather = 1 max_parallel_maintenance_workers = 1 max_parallel_workers = 2 diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml index c0ed7230..a2f1bb41 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-800 +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '100 GB' + postgresql: config: |- # Connectivity - max_connections = 2000 superuser_reserved_connections = 3 # Memory Settings @@ -28,7 +34,7 @@ postgresql: # Checkpointing: checkpoint_timeout = '30 min' checkpoint_completion_target = 0.9 - max_wal_size = '8 GB' + max_wal_size = '64 GB' min_wal_size = '4 GB' # WAL writing @@ -36,7 +42,6 @@ postgresql: wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '10 GB' # Background writer bgwriter_delay = 200ms @@ -45,7 +50,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 32 max_parallel_workers_per_gather = 16 max_parallel_maintenance_workers = 16 max_parallel_workers = 32 diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml index 2c9a48e3..cdc3c494 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-800 +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '200 GB' + postgresql: config: |- # Connectivity - max_connections = 2000 superuser_reserved_connections = 3 # Memory Settings @@ -29,14 +35,13 @@ postgresql: checkpoint_timeout = '30 min' checkpoint_completion_target = 0.9 max_wal_size = '32 GB' - min_wal_size = '16 GB' + min_wal_size = '4 GB' # WAL writing wal_compression = on wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '60 GB' # Background writer bgwriter_delay = 200ms @@ -45,7 +50,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 32 max_parallel_workers_per_gather = 16 max_parallel_maintenance_workers = 16 max_parallel_workers = 32 diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml index a60c85ce..3b5d6b45 100644 --- a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-100 +patroni: + pgParameters: + max_worker_processes: 8 + max_parallel_workers: 4 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + postgresql: config: |- # Connectivity - max_connections = 2000 superuser_reserved_connections = 3 # Memory Settings @@ -28,7 +34,7 @@ postgresql: # Checkpointing: checkpoint_timeout = '25 min' checkpoint_completion_target = 0.9 - max_wal_size = '4 GB' + max_wal_size = '8 GB' min_wal_size = '2 GB' # WAL writing @@ -36,7 +42,6 @@ postgresql: wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '4 GB' # Background writer @@ -46,7 +51,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 4 max_parallel_workers_per_gather = 2 max_parallel_maintenance_workers = 2 max_parallel_workers = 4 diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml index 08d777be..e2484df5 100644 --- a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml @@ -2,10 +2,16 @@ ## fullnameOverride: hb-postgresql-cluster-standard-200 +patroni: + pgParameters: + max_worker_processes: 16 + max_parallel_workers: 8 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + postgresql: config: |- # Connectivity - max_connections = 2000 superuser_reserved_connections = 3 # Memory Settings @@ -28,7 +34,7 @@ postgresql: # Checkpointing: checkpoint_timeout = '25 min' checkpoint_completion_target = 0.9 - max_wal_size = '5 GB' + max_wal_size = '16 GB' min_wal_size = '3 GB' # WAL writing @@ -36,7 +42,6 @@ postgresql: wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) wal_writer_delay = 200ms wal_writer_flush_after = 1MB - wal_keep_size = '5 GB' # Background writer bgwriter_delay = 200ms @@ -45,7 +50,6 @@ postgresql: bgwriter_flush_after = 0 # Parallel queries: - max_worker_processes = 8 max_parallel_workers_per_gather = 4 max_parallel_maintenance_workers = 4 max_parallel_workers = 8 From 5ec8d1afdfd4c3b0123e973b8065f2f0e5309a18 Mon Sep 17 00:00:00 2001 From: Eamon Date: Fri, 10 Jan 2025 17:47:58 +0800 Subject: [PATCH 35/93] chore(clickhouse): remove useless parameters --- addons/clickhouse/24/chart/clickhouse/values.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/addons/clickhouse/24/chart/clickhouse/values.yaml b/addons/clickhouse/24/chart/clickhouse/values.yaml index 720fa9b7..daef8b9c 100644 --- a/addons/clickhouse/24/chart/clickhouse/values.yaml +++ b/addons/clickhouse/24/chart/clickhouse/values.yaml @@ -328,7 +328,6 @@ defaultConfigurationOverrides: | 10000 30000 - {{ printf "%.0f" .Values.MergesMutationsMemoryUsageSoftLimit }} true trace From 3af79b3dd33e68c86112425693e9b341d8e4baed Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 13 Jan 2025 14:22:47 +0800 Subject: [PATCH 36/93] chore(addons): update kafka meta allow_parameters --- addons/kafka/3.6/meta.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/addons/kafka/3.6/meta.yaml b/addons/kafka/3.6/meta.yaml index a574f8a0..14623d52 100644 --- a/addons/kafka/3.6/meta.yaml +++ b/addons/kafka/3.6/meta.yaml @@ -24,9 +24,15 @@ allow_parameters: - name: "controller.extraConfig" required: false description: "controller.extraConfig config for values.yaml" +- name: "controller.replicaCount" + required: false + description: "controller.replicaCount config for values.yaml" - name: "controller.nodeSelector" required: false description: "controller.nodeSelector config for values.yaml" +- name: "broker.replicaCount" + required: false + description: "broker.replicaCount config for values.yaml" - name: "broker.extraConfig" required: false description: "broker.extraConfig config for values.yaml" @@ -60,6 +66,9 @@ allow_parameters: - name: "listeners.advertisedListeners" required: false description: "listeners advertisedListeners or not config for values.yaml" +- name: "kraft.enabled" + required: false + description: "kraft enabled or not config for values.yaml" - name: "metrics.jmx.enabled" required: false description: "metrics jmx enabled or not config for values.yaml" @@ -69,4 +78,10 @@ allow_parameters: - name: "metrics.kafka.enabled" required: false description: "metrics kafka enabled or not config for values.yaml" +- name: "externalZookeeper" + required: false + description: "externalZookeeper config for values.yaml" +- name: "zookeeperChrootPath" + required: false + description: "zookeeperChrootPath config for values.yaml" archive: false From fe2aededdbeccf47bac824d7f00c12228baaf603 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 14 Jan 2025 17:44:52 +0800 Subject: [PATCH 37/93] chore(kafka): support zookeeper mode config --- addons/kafka/3.6/meta.yaml | 15 ++++++ .../3.6/plans/standard-16c32g3w/bind.yaml | 53 +++++++++++++++---- .../kafka/3.6/plans/standard-1c2g3w/bind.yaml | 53 +++++++++++++++---- .../3.6/plans/standard-24c64g3w/bind.yaml | 53 +++++++++++++++---- .../kafka/3.6/plans/standard-2c4g3w/bind.yaml | 53 +++++++++++++++---- .../kafka/3.6/plans/standard-4c8g3w/bind.yaml | 53 +++++++++++++++---- .../3.6/plans/standard-8c16g3w/bind.yaml | 53 +++++++++++++++---- 7 files changed, 279 insertions(+), 54 deletions(-) diff --git a/addons/kafka/3.6/meta.yaml b/addons/kafka/3.6/meta.yaml index 14623d52..57d5503d 100644 --- a/addons/kafka/3.6/meta.yaml +++ b/addons/kafka/3.6/meta.yaml @@ -27,18 +27,33 @@ allow_parameters: - name: "controller.replicaCount" required: false description: "controller.replicaCount config for values.yaml" +- name: "controller.controllerOnly" + required: false + description: "controller.controllerOnly config for values.yaml" +- name: "controller.zookeeperMigrationMode" + required: false + description: "controller.zookeeperMigrationMode config for values.yaml" - name: "controller.nodeSelector" required: false description: "controller.nodeSelector config for values.yaml" +- name: "controller.persistence" + required: false + description: "controller.persistence config for values.yaml" - name: "broker.replicaCount" required: false description: "broker.replicaCount config for values.yaml" +- name: "broker.zookeeperMigrationMode" + required: false + description: "broker.zookeeperMigrationMode config for values.yaml" - name: "broker.extraConfig" required: false description: "broker.extraConfig config for values.yaml" - name: "broker.nodeSelector" required: false description: "broker.nodeSelector config for values.yaml" +- name: "broker.persistence" + required: false + description: "broker.persistence config for values.yaml" - name: "listeners.client.protocol" required: false description: "listeners client protocol config for values.yaml" diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml index 32f8ff85..b3b38988 100644 --- a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml @@ -1,3 +1,7 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} credential: - name: PROTOCOL_MAP value: {{ include "kafka.securityProtocolMap" . }} @@ -23,9 +27,7 @@ credential: serviceRef: name: {{ template "common.names.fullname" . }} jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - {{- if .Values.externalAccess.enabled }} - name: EXTERNAL_CLIENT_PORT valueFrom: serviceRef: @@ -33,17 +35,36 @@ credential: jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' {{- end }} - - {{- range $i, $internelListeners := ( splitList "," ( include "kafka.kraft.internelListeners" . ) )}} + {{- $brokerList := list }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} - name: {{ printf "KAFKA_NODE_%d" $i }} - value: {{ $internelListeners }} + value: {{ $broker }} {{- end }} - - name: KAFKA_PORT + {{- $replicaCount := int .Values.broker.replicaCount }} + {{- if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT valueFrom: serviceRef: - name: {{ template "common.names.fullname" . }} + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} {{- if (include "kafka.client.saslEnabled" .) }} @@ -75,8 +96,21 @@ credential: {{- end }} {{- end }} + {{- if .Values.externalAccess.enabled }} - {{- $fullname := include "common.names.fullname" . }} + {{- if or (not .Values.kraft.enabled) (.Values.controller.controllerOnly) }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} @@ -86,6 +120,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} - - name: EXTERNAL_KAFKA_PORT + - name: EXTERNAL_KAFKA_NODE_PORT value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} {{- end }} From 44f06b0e084da55cd9da744f1af0cda1e6960462 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 23 Jan 2025 13:47:25 +0800 Subject: [PATCH 38/93] chore(kvrocks): add 2.10 version --- addons/index.yaml | 2 + .../2.10/chart/kvrocks-2.10/.helmignore | 21 + .../2.10/chart/kvrocks-2.10/Chart.yaml | 23 + .../kvrocks/2.10/chart/kvrocks-2.10/README.md | 557 ++++++ .../kvrocks-2.10/ci/extra-flags-values.yaml | 12 + .../kvrocks-2.10/ci/sentinel-values.yaml | 6 + .../kvrocks-2.10/ci/standalone-values.yaml | 1 + .../img/redis-cluster-topology.png | Bin 0 -> 11448 bytes .../chart/kvrocks-2.10/img/redis-topology.png | Bin 0 -> 9709 bytes .../chart/kvrocks-2.10/templates/NOTES.txt | 180 ++ .../chart/kvrocks-2.10/templates/_helpers.tpl | 208 ++ .../kvrocks-2.10/templates/configmap.yaml | 27 + .../kvrocks-2.10/templates/extra-list.yaml | 4 + .../kvrocks-2.10/templates/headless-svc.yaml | 30 + .../templates/health-configmap.yaml | 141 ++ .../templates/master/application.yaml | 411 ++++ .../kvrocks-2.10/templates/master/psp.yaml | 46 + .../kvrocks-2.10/templates/master/pvc.yaml | 26 + .../templates/master/service.yaml | 49 + .../kvrocks-2.10/templates/metrics-svc.yaml | 38 + .../kvrocks-2.10/templates/networkpolicy.yaml | 80 + .../chart/kvrocks-2.10/templates/pdb.yaml | 23 + .../templates/prometheusrule.yaml | 27 + .../kvrocks-2.10/templates/replicas/hpa.yaml | 35 + .../templates/replicas/service.yaml | 49 + .../templates/replicas/statefulset.yaml | 410 ++++ .../chart/kvrocks-2.10/templates/role.yaml | 28 + .../kvrocks-2.10/templates/rolebinding.yaml | 21 + .../templates/scripts-configmap.yaml | 431 +++++ .../chart/kvrocks-2.10/templates/secret.yaml | 23 + .../kvrocks-2.10/templates/sentinel/hpa.yaml | 35 + .../templates/sentinel/node-services.yaml | 88 + .../templates/sentinel/ports-configmap.yaml | 100 + .../templates/sentinel/service.yaml | 114 ++ .../templates/sentinel/statefulset.yaml | 654 +++++++ .../templates/serviceaccount.yaml | 21 + .../templates/servicemonitor.yaml | 45 + .../chart/kvrocks-2.10/values.schema.json | 156 ++ .../2.10/chart/kvrocks-2.10/values.yaml | 1668 +++++++++++++++++ addons/kvrocks/2.10/meta.yaml | 57 + .../2.10/plans/standard-16c32g1024/bind.yaml | 105 ++ .../create-instance-schema.json | 12 + .../2.10/plans/standard-16c32g1024/meta.yaml | 6 + .../plans/standard-16c32g1024/values.yaml | 138 ++ .../2.10/plans/standard-1c2g64/bind.yaml | 105 ++ .../create-instance-schema.json | 12 + .../2.10/plans/standard-1c2g64/meta.yaml | 6 + .../2.10/plans/standard-1c2g64/values.yaml | 139 ++ .../2.10/plans/standard-2c4g128/bind.yaml | 105 ++ .../create-instance-schema.json | 12 + .../2.10/plans/standard-2c4g128/meta.yaml | 6 + .../2.10/plans/standard-2c4g128/values.yaml | 139 ++ .../2.10/plans/standard-4c8g256/bind.yaml | 105 ++ .../create-instance-schema.json | 12 + .../2.10/plans/standard-4c8g256/meta.yaml | 6 + .../2.10/plans/standard-4c8g256/values.yaml | 138 ++ .../2.10/plans/standard-8c16g512/bind.yaml | 105 ++ .../create-instance-schema.json | 12 + .../2.10/plans/standard-8c16g512/meta.yaml | 6 + .../2.10/plans/standard-8c16g512/values.yaml | 138 ++ 60 files changed, 7154 insertions(+) create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/.helmignore create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/Chart.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/README.md create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/ci/extra-flags-values.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/ci/sentinel-values.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/ci/standalone-values.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/img/redis-cluster-topology.png create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/img/redis-topology.png create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/NOTES.txt create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/_helpers.tpl create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/configmap.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/extra-list.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/headless-svc.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/health-configmap.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/application.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/psp.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/pvc.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/service.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/metrics-svc.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/networkpolicy.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/pdb.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/prometheusrule.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/hpa.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/service.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/statefulset.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/role.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/rolebinding.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/scripts-configmap.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/secret.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/hpa.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/node-services.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/ports-configmap.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/service.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/statefulset.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/serviceaccount.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/templates/servicemonitor.yaml create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/values.schema.json create mode 100644 addons/kvrocks/2.10/chart/kvrocks-2.10/values.yaml create mode 100644 addons/kvrocks/2.10/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-16c32g1024/bind.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json create mode 100644 addons/kvrocks/2.10/plans/standard-16c32g1024/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-1c2g64/bind.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json create mode 100644 addons/kvrocks/2.10/plans/standard-1c2g64/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-2c4g128/bind.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json create mode 100644 addons/kvrocks/2.10/plans/standard-2c4g128/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-4c8g256/bind.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json create mode 100644 addons/kvrocks/2.10/plans/standard-4c8g256/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-8c16g512/bind.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json create mode 100644 addons/kvrocks/2.10/plans/standard-8c16g512/meta.yaml create mode 100644 addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index a3cf0a27..33cbbd9c 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -63,6 +63,8 @@ entries: kvrocks: - version: 2.8 description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." + - version: "2.10" + description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." yugabytedb: - version: 2024 description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " \ No newline at end of file diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/.helmignore b/addons/kvrocks/2.10/chart/kvrocks-2.10/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/Chart.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/Chart.yaml new file mode 100644 index 00000000..fa1afc68 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: "2" +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.3 +description: Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. Kvrocks intends to decrease the cost of memory and increase the capacity while compared to Redis. The design of replication and storage was inspired by rocksplicator and blackwidow. +engine: gotpl +home: https://github.com/apache/kvrocks +icon: https://camo.githubusercontent.com/afe4ab77c3cf0ef991e1dd7aa9f25b9ff96651a6b1cc0b055eda16567dc9d8be/68747470733a2f2f6b76726f636b732e6170616368652e6f72672f696d672f6b76726f636b732d66656174757265642e706e67 +keywords: + - kvrocks + - keyvalue + - database +maintainers: + - email: drycc@drycc.cc + name: drycc +name: kvrocks +sources: + - https://github.com/apache/kvrocks +version: "2.10" diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/README.md b/addons/kvrocks/2.10/chart/kvrocks-2.10/README.md new file mode 100644 index 00000000..025c1922 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/README.md @@ -0,0 +1,557 @@ + + +# Kvrocks packaged by drycc + +Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with kvrocks protocol. Kvrocks intends to decrease the cost of memory and increase the capacity while compared to kvrocks. The design of replication and storage was inspired by rocksplicator and blackwidow. + +[Overview of Kvrocks;](https://kvrocks.apache.org/) + +## Introduction + +This chart bootstraps a [kvrocks](https://github.com/drycc-addons/addons/tree/main/addons/kvrocks/2.10) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + + +### Choose between Kvrocks; Helm Chart and Kvrocks; Cluster Helm Chart + +You can choose any of the two Kvrocks; Helm charts for deploying a Kvrocks; cluster. + +1. [Kvrocks; Helm Chart](https://github.com/drycc-addons/addons/tree/main/addons/kvrocks) will deploy a master-slave cluster, with the [option](https://github.com/drycc-addons/addons/tree/main/addons/kvrocks#sentinel-configuration-parameters) of enabling using Kvrocks; Sentinel. +2. [Kvrocks; Cluster Helm Chart](https://github.com/drycc-addons/addons/tree/main/addons/kvrocks) will deploy + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release drycc-addons/kvrocks +``` + +The command deploys Kvrocks; on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | -------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.kvrocks.password` | Global Kvrocks; password (overrides `auth.password`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `secretAnnotations` | Annotations to add to secret | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### Kvrocks; Image parameters + +| Name | Description | Value | +| ------------------- | ------------------------------------------------------- | ---------------------- | +| `image.registry` | Kvrocks; image registry | `docker.io` | +| `image.repository` | Kvrocks; image repository | `bitnami/kvrocks` | +| `image.tag` | Kvrocks; image tag (immutable tags are recommended) | `6.2.6-debian-10-r169` | +| `image.pullPolicy` | Kvrocks; image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Kvrocks; image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | + + +### Kvrocks; common configuration parameters + +| Name | Description | Value | +| -------------------------------- | --------------------------------------------------------------------------------------- | ------------- | +| `architecture` | Kvrocks; architecture. Allowed values: `standalone` or `replication` | `replication` | +| `auth.enabled` | Enable password authentication | `true` | +| `auth.sentinel` | Enable password authentication on sentinels too | `true` | +| `auth.password` | Kvrocks; password | `""` | +| `auth.existingSecret` | The name of an existing secret with Kvrocks; credentials | `""` | +| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` | +| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Kvrocks; nodes | `""` | + + +### Kvrocks; master configuration parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------------------------ | +| `master.configuration` | Configuration for Kvrocks; master nodes | `""` | +| `master.disableCommands` | Array with Kvrocks; commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.preExecCmds` | Additional commands to run prior to starting Kvrocks; master | `[]` | +| `master.extraFlags` | Array with additional command line flags for Kvrocks; master | `[]` | +| `master.extraEnvVars` | Array with extra environment variables to add to Kvrocks; master nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Kvrocks; master nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Kvrocks; master nodes | `""` | +| `master.containerPorts.kvrocks` | Container port to open on Kvrocks; master nodes | `6379` | +| `master.startupProbe.enabled` | Enable startupProbe on Kvrocks; master nodes | `false` | +| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `20` | +| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `master.livenessProbe.enabled` | Enable livenessProbe on Kvrocks; master nodes | `true` | +| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `master.readinessProbe.enabled` | Enable readinessProbe on Kvrocks; master nodes | `true` | +| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `master.resources.limits` | The resources limits for the Kvrocks; master containers | `{}` | +| `master.resources.requests` | The requested resources for the Kvrocks; master containers | `{}` | +| `master.podSecurityContext.enabled` | Enabled Kvrocks; master pods' Security Context | `true` | +| `master.podSecurityContext.fsGroup` | Set Kvrocks; master pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled Kvrocks; master containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set Kvrocks; master containers' Security Context runAsUser | `1001` | +| `master.kind` | Use either Deployment or StatefulSet (default) | `StatefulSet` | +| `master.schedulerName` | Alternate scheduler for Kvrocks; master pods | `""` | +| `master.updateStrategy.type` | Kvrocks; master statefulset strategy type | `RollingUpdate` | +| `master.priorityClassName` | Kvrocks; master pods' priorityClassName | `""` | +| `master.hostAliases` | Kvrocks; master pods host aliases | `[]` | +| `master.podLabels` | Extra labels for Kvrocks; master pods | `{}` | +| `master.podAnnotations` | Annotations for Kvrocks; master pods | `{}` | +| `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Kvrocks; master pods | `false` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for Kvrocks; master pods assignment | `{}` | +| `master.nodeSelector` | Node labels for Kvrocks; master pods assignment | `{}` | +| `master.tolerations` | Tolerations for Kvrocks; master pods assignment | `[]` | +| `master.topologySpreadConstraints` | Spread Constraints for Kvrocks; master pod assignment | `[]` | +| `master.dnsPolicy` | DNS Policy for Kvrocks; master pod | `""` | +| `master.dnsConfig` | DNS Configuration for Kvrocks; master pod | `{}` | +| `master.lifecycleHooks` | for the Kvrocks; master container(s) to automate configuration before or after startup | `{}` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the Kvrocks; master pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kvrocks; master container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the Kvrocks; master pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the Kvrocks; master pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence on Kvrocks; master nodes using Persistent Volume Claims | `true` | +| `master.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `master.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `master.persistence.path` | The path the volume will be mounted at on Kvrocks; master containers | `/data` | +| `master.persistence.subPath` | The subdirectory of the volume to mount on Kvrocks; master containers | `""` | +| `master.persistence.storageClass` | Persistent Volume storage class | `""` | +| `master.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume size | `8Gi` | +| `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `master.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `master.persistence.dataSource` | Custom PVC data source | `{}` | +| `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `master.service.type` | Kvrocks; master service type | `ClusterIP` | +| `master.service.ports.kvrocks` | Kvrocks; master service port | `6379` | +| `master.service.nodePorts.kvrocks` | Node port for Kvrocks; master | `""` | +| `master.service.externalTrafficPolicy` | Kvrocks; master service external traffic policy | `Cluster` | +| `master.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `master.service.clusterIP` | Kvrocks; master service Cluster IP | `""` | +| `master.service.loadBalancerIP` | Kvrocks; master service Load Balancer IP | `""` | +| `master.service.loadBalancerSourceRanges` | Kvrocks; master service Load Balancer sources | `[]` | +| `master.service.annotations` | Additional custom annotations for Kvrocks; master service | `{}` | +| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the kvrocks-master pods | `30` | + + +### Kvrocks; replicas configuration parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------- | ------------------------ | +| `replica.replicaCount` | Number of Kvrocks; replicas to deploy | `3` | +| `replica.configuration` | Configuration for Kvrocks; replicas nodes | `""` | +| `replica.disableCommands` | Array with Kvrocks; commands to disable on replicas nodes | `["FLUSHDB","FLUSHALL"]` | +| `replica.command` | Override default container command (useful when using custom images) | `[]` | +| `replica.args` | Override default container args (useful when using custom images) | `[]` | +| `replica.preExecCmds` | Additional commands to run prior to starting Kvrocks; replicas | `[]` | +| `replica.extraFlags` | Array with additional command line flags for Kvrocks; replicas | `[]` | +| `replica.extraEnvVars` | Array with extra environment variables to add to Kvrocks; replicas nodes | `[]` | +| `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Kvrocks; replicas nodes | `""` | +| `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Kvrocks; replicas nodes | `""` | +| `replica.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `replica.externalMaster.host` | External master host to bootstrap from | `""` | +| `replica.externalMaster.port` | Port for kvrocks service external master host | `6379` | +| `replica.containerPorts.kvrocks` | Container port to open on Kvrocks; replicas nodes | `6379` | +| `replica.startupProbe.enabled` | Enable startupProbe on Kvrocks; replicas nodes | `true` | +| `replica.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `replica.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `replica.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `replica.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `replica.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `replica.livenessProbe.enabled` | Enable livenessProbe on Kvrocks; replicas nodes | `true` | +| `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `replica.readinessProbe.enabled` | Enable readinessProbe on Kvrocks; replicas nodes | `true` | +| `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `replica.resources.limits` | The resources limits for the Kvrocks; replicas containers | `{}` | +| `replica.resources.requests` | The requested resources for the Kvrocks; replicas containers | `{}` | +| `replica.podSecurityContext.enabled` | Enabled Kvrocks; replicas pods' Security Context | `true` | +| `replica.podSecurityContext.fsGroup` | Set Kvrocks; replicas pod's Security Context fsGroup | `1001` | +| `replica.containerSecurityContext.enabled` | Enabled Kvrocks; replicas containers' Security Context | `true` | +| `replica.containerSecurityContext.runAsUser` | Set Kvrocks; replicas containers' Security Context runAsUser | `1001` | +| `replica.schedulerName` | Alternate scheduler for Kvrocks; replicas pods | `""` | +| `replica.updateStrategy.type` | Kvrocks; replicas statefulset strategy type | `RollingUpdate` | +| `replica.priorityClassName` | Kvrocks; replicas pods' priorityClassName | `""` | +| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` | +| `replica.hostAliases` | Kvrocks; replicas pods host aliases | `[]` | +| `replica.podLabels` | Extra labels for Kvrocks; replicas pods | `{}` | +| `replica.podAnnotations` | Annotations for Kvrocks; replicas pods | `{}` | +| `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Kvrocks; replicas pods | `false` | +| `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` | +| `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` | +| `replica.affinity` | Affinity for Kvrocks; replicas pods assignment | `{}` | +| `replica.nodeSelector` | Node labels for Kvrocks; replicas pods assignment | `{}` | +| `replica.tolerations` | Tolerations for Kvrocks; replicas pods assignment | `[]` | +| `replica.topologySpreadConstraints` | Spread Constraints for Kvrocks; replicas pod assignment | `[]` | +| `replica.dnsPolicy` | DNS Policy for Kvrocks; replica pods | `""` | +| `replica.dnsConfig` | DNS Configuration for Kvrocks; replica pods | `{}` | +| `replica.lifecycleHooks` | for the Kvrocks; replica container(s) to automate configuration before or after startup | `{}` | +| `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Kvrocks; replicas pod(s) | `[]` | +| `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kvrocks; replicas container(s) | `[]` | +| `replica.sidecars` | Add additional sidecar containers to the Kvrocks; replicas pod(s) | `[]` | +| `replica.initContainers` | Add additional init containers to the Kvrocks; replicas pod(s) | `[]` | +| `replica.persistence.enabled` | Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims | `true` | +| `replica.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `replica.persistence.path` | The path the volume will be mounted at on Kvrocks; replicas containers | `/data` | +| `replica.persistence.subPath` | The subdirectory of the volume to mount on Kvrocks; replicas containers | `""` | +| `replica.persistence.storageClass` | Persistent Volume storage class | `""` | +| `replica.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `replica.persistence.size` | Persistent Volume size | `8Gi` | +| `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `replica.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `replica.persistence.dataSource` | Custom PVC data source | `{}` | +| `replica.service.type` | Kvrocks; replicas service type | `ClusterIP` | +| `replica.service.ports.kvrocks` | Kvrocks; replicas service port | `6379` | +| `replica.service.nodePorts.kvrocks` | Node port for Kvrocks; replicas | `""` | +| `replica.service.externalTrafficPolicy` | Kvrocks; replicas service external traffic policy | `Cluster` | +| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `replica.service.clusterIP` | Kvrocks; replicas service Cluster IP | `""` | +| `replica.service.loadBalancerIP` | Kvrocks; replicas service Load Balancer IP | `""` | +| `replica.service.loadBalancerSourceRanges` | Kvrocks; replicas service Load Balancer sources | `[]` | +| `replica.service.annotations` | Additional custom annotations for Kvrocks; replicas service | `{}` | +| `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the kvrocks-replicas pods | `30` | +| `replica.autoscaling.enabled` | Enable replica autoscaling settings | `false` | +| `replica.autoscaling.minReplicas` | Minimum replicas for the pod autoscaling | `1` | +| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` | +| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` | +| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` | + + +### Kvrocks with Sentinel configuration parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `sentinel.enabled` | Use Kvrocks; Sentinel on Kvrocks; pods. | `false` | +| `sentinel.image.registry` | Kvrocks; Sentinel image registry | `docker.io` | +| `sentinel.image.repository` | Kvrocks; Sentinel image repository | `bitnami/sentinel` | +| `sentinel.image.tag` | Kvrocks; Sentinel image tag (immutable tags are recommended) | `6.2.6-debian-10-r167` | +| `sentinel.image.pullPolicy` | Kvrocks; Sentinel image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Kvrocks; Sentinel image pull secrets | `[]` | +| `sentinel.image.debug` | Enable image debug mode | `false` | +| `sentinel.masterSet` | Master set name | `mymaster` | +| `sentinel.quorum` | Sentinel Quorum | `2` | +| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `220` | +| `sentinel.automateClusterRecovery` | Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. | `false` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Kvrocks; node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` | +| `sentinel.configuration` | Configuration for Kvrocks; Sentinel nodes | `""` | +| `sentinel.command` | Override default container command (useful when using custom images) | `[]` | +| `sentinel.args` | Override default container args (useful when using custom images) | `[]` | +| `sentinel.preExecCmds` | Additional commands to run prior to starting Kvrocks; Sentinel | `[]` | +| `sentinel.extraEnvVars` | Array with extra environment variables to add to Kvrocks; Sentinel nodes | `[]` | +| `sentinel.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Kvrocks; Sentinel nodes | `""` | +| `sentinel.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Kvrocks; Sentinel nodes | `""` | +| `sentinel.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `sentinel.externalMaster.host` | External master host to bootstrap from | `""` | +| `sentinel.externalMaster.port` | Port for kvrocks service external master host | `6379` | +| `sentinel.containerPorts.sentinel` | Container port to open on Kvrocks; Sentinel nodes | `26379` | +| `sentinel.startupProbe.enabled` | Enable startupProbe on Kvrocks; Sentinel nodes | `true` | +| `sentinel.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `sentinel.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `sentinel.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `sentinel.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Kvrocks; Sentinel nodes | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Kvrocks; Sentinel nodes | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `sentinel.persistence.enabled` | Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) | `false` | +| `sentinel.persistence.storageClass` | Persistent Volume storage class | `""` | +| `sentinel.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `sentinel.persistence.size` | Persistent Volume size | `100Mi` | +| `sentinel.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `sentinel.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `sentinel.persistence.dataSource` | Custom PVC data source | `{}` | +| `sentinel.resources.limits` | The resources limits for the Kvrocks; Sentinel containers | `{}` | +| `sentinel.resources.requests` | The requested resources for the Kvrocks; Sentinel containers | `{}` | +| `sentinel.containerSecurityContext.enabled` | Enabled Kvrocks; Sentinel containers' Security Context | `true` | +| `sentinel.containerSecurityContext.runAsUser` | Set Kvrocks; Sentinel containers' Security Context runAsUser | `1001` | +| `sentinel.lifecycleHooks` | for the Kvrocks; sentinel container(s) to automate configuration before or after startup | `{}` | +| `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Kvrocks; Sentinel | `[]` | +| `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kvrocks; Sentinel container(s) | `[]` | +| `sentinel.service.type` | Kvrocks; Sentinel service type | `ClusterIP` | +| `sentinel.service.ports.kvrocks` | Kvrocks; service port for Kvrocks; | `6379` | +| `sentinel.service.ports.sentinel` | Kvrocks; service port for Kvrocks; Sentinel | `26379` | +| `sentinel.service.nodePorts.kvrocks` | Node port for Kvrocks; | `""` | +| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `""` | +| `sentinel.service.externalTrafficPolicy` | Kvrocks; Sentinel service external traffic policy | `Cluster` | +| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `sentinel.service.clusterIP` | Kvrocks; Sentinel service Cluster IP | `""` | +| `sentinel.service.loadBalancerIP` | Kvrocks; Sentinel service Load Balancer IP | `""` | +| `sentinel.service.loadBalancerSourceRanges` | Kvrocks; Sentinel service Load Balancer sources | `[]` | +| `sentinel.service.annotations` | Additional custom annotations for Kvrocks; Sentinel service | `{}` | +| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the kvrocks-node pods | `30` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.enabled` | Enable PodSecurityPolicy's RBAC rules | `false` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` | +| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | + + +### Metrics Parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------------ | +| `metrics.enabled` | Start a sidecar prometheus exporter to expose Kvrocks; metrics | `false` | +| `metrics.image.registry` | Kvrocks; Exporter image registry | `docker.io` | +| `metrics.image.repository` | Kvrocks; Exporter image repository | `bitnami/kvrocks-exporter` | +| `metrics.image.tag` | Kvrocks; Kvrocks; Exporter image tag (immutable tags are recommended) | `1.37.0-debian-10-r9` | +| `metrics.image.pullPolicy` | Kvrocks; Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Kvrocks; Exporter image pull secrets | `[]` | +| `metrics.command` | Override default metrics container init command (useful when using custom images) | `[]` | +| `metrics.kvrocksTargetHost` | A way to specify an alternative Kvrocks; hostname | `localhost` | +| `metrics.extraArgs` | Extra arguments for Kvrocks; exporter, for example: | `{}` | +| `metrics.extraEnvVars` | Array with extra environment variables to add to Kvrocks; exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled Kvrocks; exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Kvrocks; exporter containers' Security Context runAsUser | `1001` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Kvrocks; metrics sidecar | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kvrocks; metrics sidecar | `[]` | +| `metrics.resources.limits` | The resources limits for the Kvrocks; exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the Kvrocks; exporter container | `{}` | +| `metrics.podLabels` | Extra labels for Kvrocks; exporter pods | `{}` | +| `metrics.podAnnotations` | Annotations for Kvrocks; exporter pods | `{}` | +| `metrics.service.type` | Kvrocks; exporter service type | `ClusterIP` | +| `metrics.service.port` | Kvrocks; exporter service port | `9121` | +| `metrics.service.externalTrafficPolicy` | Kvrocks; exporter service external traffic policy | `Cluster` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.service.loadBalancerIP` | Kvrocks; exporter service Load Balancer IP | `""` | +| `metrics.service.loadBalancerSourceRanges` | Kvrocks; exporter service Load Balancer sources | `[]` | +| `metrics.service.annotations` | Additional custom annotations for Kvrocks; exporter service | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` | +| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Metrics RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | Metrics RelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` | +| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` | +| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10-debian-10-r378` | +| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | +| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` | +| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` | +| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `10-debian-10-r378` | +| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` | +| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctl.resources.limits` | The resources limits for the init container | `{}` | +| `sysctl.resources.requests` | The requested resources for the init container | `{}` | + + +### useExternalDNS Parameters + +| Name | Description | Value | +| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `useExternalDNS.enabled` | Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. | `false` | +| `useExternalDNS.additionalAnnotations` | Extra annotations to be utilized when `external-dns` is enabled. | `{}` | +| `useExternalDNS.annotationKey` | The annotation key utilized when `external-dns` is enabled. | `external-dns.alpha.kubernetes.io/` | +| `useExternalDNS.suffix` | The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. | `""` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.password=secretpassword \ + bitnami/kvrocks +``` + +The above command sets the Kvrocks; server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/kvrocks +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +#### Default: Master-Replicas + +When installing the chart with `architecture=replication`, it will deploy a Kvrocks; master StatefulSet (only one master node allowed) and a Kvrocks; replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed: + +- Kvrocks; Master service: Points to the master, where read-write operations can be performed +- Kvrocks; Replicas service: Points to the replicas, where only read operations are allowed. + +In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Standalone + +When installing the chart with `architecture=standalone`, it will deploy a standalone Kvrocks; StatefulSet (only one node allowed). A single service will be exposed: + +- Kvrocks; Master service: Points to the master, where read-write operations can be performed + +#### Master-Replicas with Sentinel + +When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Kvrocks; master StatefulSet (only one master allowed) and a Kvrocks; replicas StatefulSet. In this case, the pods will contain an extra container with Kvrocks; Sentinel. This container will form a cluster of Kvrocks; Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + +- Kvrocks; service: Exposes port 6379 for Kvrocks; read-only operations and port 26379 for accessing Kvrocks; Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Kvrocks; Sentinel cluster and query the current master using the command below (using redis-cli or similar): + +``` +SENTINEL get-master-addr-by-name +``` + +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Host Kernel Settings + +Kvrocks; may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. + +Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/kvrocks/administration/configure-kernel-settings/). + +## Persistence + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set master.persistence.existingClaim=PVC_NAME bitnami/kvrocks +``` + +## Backup and restore + +Refer to the chart documentation for more information on [backing up and restoring Kvrocks; deployments](https://docs.bitnami.com/kubernetes/infrastructure/kvrocks/administration/backup-restore/). + +## NetworkPolicy + +To enable network policy for Kvrocks;, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +Refer to the chart documenation for more information on [enabling the network policy in Kvrocks; deployments](https://docs.bitnami.com/kubernetes/infrastructure/kvrocks/administration/enable-network-policy/). + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. \ No newline at end of file diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/extra-flags-values.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/extra-flags-values.yaml new file mode 100644 index 00000000..8c1dcefb --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/extra-flags-values.yaml @@ -0,0 +1,12 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +replica: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +auth: + enabled: false diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/sentinel-values.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/sentinel-values.yaml new file mode 100644 index 00000000..48dfa1d4 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/sentinel-values.yaml @@ -0,0 +1,6 @@ +sentinel: + enabled: true +metrics: + enabled: true + sentinel: + enabled: true diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/standalone-values.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/standalone-values.yaml new file mode 100644 index 00000000..dfef688c --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/ci/standalone-values.yaml @@ -0,0 +1 @@ +architecture: standalone diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/img/redis-cluster-topology.png b/addons/kvrocks/2.10/chart/kvrocks-2.10/img/redis-cluster-topology.png new file mode 100644 index 0000000000000000000000000000000000000000..f0a02a9f8835381302731c9cb000b2835a45e7c9 GIT binary patch literal 11448 zcmeI2cUx22x9_8Zh@yyC0I4cPR3wC|kVp$H5PB1ZB#;m~gf1}_1O)^|QB*Y2#4gyt zMlbeAQxFvp712m>hX{m(GZyaOx!ZkSz`gf*j(>!>GS^(QjWNFCGu9zzC!56!6&9jU zsKs`+R<0=2Tv-%qj{Ji8aAni0vR)KQEHl>HJ2pI#N)HP{sbegEe^b}f4US~Qs$;Cw z_4G(lQ96Ni5-o-l&d`YniiJz?dw66Zok|Z1{M|-RS5J47uKp%8#vQGzjxpCah7XLM zj-j5O@9*{`T2RE_9UAE9LI+xoBnmwuHj)v%{&$O@SQ71bZ+Kl2DH#*!W~guJ6YNcK zHZTeO`>F9kF${WS#P4QkJslGrH2U}5u}M)uzb^*{#nUN4$W@Fr%;@i-!xQO$57ytD z8g7NACsAo=gGf@4U2vq4|L;yBNa25X;tb>6G}|@C+Q2i|iEP41uyWQ#JBJ%4#8?F< zhJ?qHVxj_RUKr=dpkNn9ac?y zjjf4Zb6GNz?1!C-z2!T|`I6FNrJxgzEdNh&X5<(5KrG*81T6sFq zW4#j0(FP$=R0ktuGQKhJ>5aE1x<}i)7{Yf-uoVV+b&Uyx|9F@?Q)9v%9i!kom8xr_ zZyDi8quLlJ$HoMDMv5Etu;N25ccDbf0|Q5Hey zuy9hmyN{lmxi_9+6lmk*8e~ZKun3K`af^yY(`;?=K0&@@N^D4|ZmdgqQerIL%hrxy zM`KVuoedc@Cs*&NU@z}TC!YjoOLVwfgp0XBq^=QOH`LfZDk)yy$}P!&oMd9pNV2eV za19GKbTn|V3?&(oC>TsM-pj)_-XqzO!f>`XcTI{5(F-#P4|eb+*@i_q#RWz9Cb}lU zY#Dly5oBMwdxVb>J|ZxR=Hn2B(esIp*C%@g#keGsV#(gFfsDuqU)z`@qaaF%b3~+m zaCivCJ1!{#Z-OS0%&mNabUj@xDKwK12OC|CUMSr<(LBO6#=*zKmh2FMcXPB0wAZ(G zOLT#y3^MdVd+FN4H&{DMs+$|$6}}_mun)3ByGD6M!*@NGc#37RH*(}cY~w5(g7s}^ zzJ{deP$OjHtPIHRcv})F*daVJ9OLS0Leg`Niu1-BQNyAVy)cG$-t^cQdT5*_#!w#< z6bFATlk}s>LCIl6HxqO;(KpT^F4{I2o&%lvSS5MJQIi?*1e?StBg+V5BGsE18|WMZ zW@PMb;;rjJh)Z(TCxklSlZ>ctaW=Yo)I?_w8+1I}9FDPYfEk{Rp z-ILpbFFK4J$#eW+T6n?W^l5qJZKu}h5eQ&HLB6NEPly?kFMd+9B*;21U%BtZEs%?pK-aOJA&_a1j0}?4#_1KVkJ~ zb1N$=!~T5LHEY(Ki6ShOkvZyUtN}kg)=-p%p8{pGCE=%=k}YB~GBVw%{}0_J=Dl%c zNm&_1!2faZ@ZlzI_bqfh*IP+9=vgI}rUzo0%pyLDCQ>#KFyN?VT{J-WBp zCg)?vCAY`Vo>`JeY8zKB+H+FMc}Q(BfZHZ_r67MSRsc_F7C zywt+dvh}JDW@2iJ{P>7kKAJK=H#hgl*|VFbw_3cM9SuWCQ8xa>xpUb;&rXoK%1McR zdnJ}CDoP!Dk~;a<;97Nc@fy+86}OQmPoC7l)OA^+lzf7Q)M`ViYrc0ZqM~%M)1#Jw zfq};3$}t=HHo0Bw?;rEaU97FebB`UnQf>OC*hZRgcFPL?lreG9^k2lGzd~ZiK3a#p zy)Aj;;m?Kl6GpPH?|86AUw`=-!|+_};cth8N7wOf@{BbUz8)iD>yD^dMsdu3sjgcm zW@l$t760Qj&u4MW_&1NM{x!;Vb#<@w7h9$;X)0sy)@H8k+&6o3!pHoJ>1>urexUjA z{*xzH?Mxn059_{ZJ+&^q>tPZG7Orn-Nb**9*6rK+L&p|ya{?=HmlwBKDP8rsHUCla zcztmWHlwP_c!*J;ZD_dCY!|w?1!4%~4*Dqs@kWrCfWZ=WDk*3@9_9&Rg z5F_^U)4UCdKkmGFRb;&gdi0)Bv?Q|{S_2<>6TK>ZjGG z9{XfuWax&!)fbyTe+?S#0yhM(;#x%99AT%DK(HZO64jEUVl(pL5m;Fr&fwZPdwct( z%F5ZbwcE;A()V1-YtksJyA;;q7Ewg>BpOX=mYSHHB=uG^+sd@U;l8gOmt?7&bDNkM zu3c;-9?ml75dN@2UC14Kwt46Ayu3oYV;XHVV{t*d3K&7wJ2t7s>ig?!V=XEy zTei8u;}z>#O<*5YkYo515Ix`}TT?sxu@^5d`*D}*ybg@r&so3e+v-a$5 zZUo2RH_Wjx%ILh}MVX!$Yom3g3NIn!T+5S?3nT>6XvGdEb=qi07N;DkD{zvz0b5=XcjZ z%dB}4OKjOLo4Fig7=&v@&9~z!vfyUH!r>W8>ohdhn40=cWhIkX&B#nMgdSzQe698C z&HwuHqLib$HKeUX^6As3C7g@0e3~gTl8)Z9$CYK#Xmq|lJ(njRXfx1~)AFtJO6k9w zkVqt3A`$kLt7TxY0_>xW)*>D8sh7e;aXnntiCP%~e12HDO9BW)FCB@FA<~X*CewMZ8<>mEH4zkm8a-`u%DeyvRRBF-r^R>#fZ%=tc?G*uabB8b?O`
    gt0u3YK+3VbZ)7Cr=g*&)YecY;+uHRLladT3#L-cG+Z4CN zKHgdOtr;9@>`b#Ba+WJ_{4yh$6P!auoRD{nC(pX_v#ErxkgbB-IsWFP&W_}s%qJmt z*68S5|2A?sI~z}@uitaVzm50v=e`T{#J^q`x%l~|^x)X1PD%ECQ3)6r7}&pZlW#!x z&7i92f%*0h4r6aaZ}j74CZ$tSQjUNV{3&o&kyxTw>qazFXX3Act31{S^Z5WvqP%;8 zRA3x!@p~>={SD~@*D62Me{9N}gIbv>4iVJ*7S*vcbLjhxnzRda8yg$XpFfY95st75=?aFC9}wq0 zF)@LdjmM0RV|#f>r?%h2M3;$#Pa{fO)5_jR(9T%&W!y0{L3|lv0qAhQ-1KmF^PQ#ZP>O;1`%Dz!7D2~MQU ziWTB2Dk_SwVr4}3^O^Dya_Y^8?nk&h01c(e<# zn#xC@6l4*7aQx)S269E;yL${|OE<2}jM=QQ;)KaRrkkog^P>D`le>${Uf8$b)1Kp> zs|yMWF0X0_-=xd4u&0Y3+CqaAK$nqa%A?fkq#lyJoui-Hen2>X}amW4T zsHl&Pl3-Q+5SF_hB#eanpW7uIe*?#A_w<-e)@q~SyPJD|z&6%iOR!QgO3a%bavc#AD>^6l_mA0H!azv|NZlSo-Y>{7yA@;9CP`qRRbUn%6+e)_JqE@(-#Tj($SLZ+CP~bsy z^z=kD2pv1Nx#8&8SRE{FDJOU-zYL2!Y4}F2TC*4mHh4*NbaWfddEQmOd&^+c{tSn~ z1k|3gva*{asc|0Qw8G!sK8kJ7>0&1%@zj--rXaiP*RVBWzVhzUM4V8cKhnFInYN=G zH(rj*4J;8!8EBy5Shg7(^gc>&AhW&&K%fp5%101T+q}K=U2!!ZR+L`2pe;1(yhCD5 zwZLA;2r!@WQK-ns$WLFsz)Bk!GrmY|iT$d|OtY-O3CYmGMWK8=bZ|_=(b>~egE`;B z&DC`utbzl(e>s=PLdF(@$Qo;F>;K4_clR(EnVF@3${I-&7|nq*XIAU@RjD#F9Ja!- z1gAUx{SAmEZGM?Fa-d@lh@@~DD8%M>?%ZkX;BXAvqz+>)+VLR1jdqa4x`}xGPujwJ zjrkWlZqbNgA0GNlNuA6sD9GCRDA|r(xQNS{?CRYFpMIBcClzE-x)A0>$IUe54MLV{ z5YKI7rd=<`4U$-_CQ(64=M>`}9L2G*vBc3jiKCFeFf-JY%#xS?czdS-j34BuI)Bic zdm|o%rQaRgNkv&y6rbx8JJcqgxpGr3w#j*=j^6?ni&d~S!D(vw4hk!~PV_BJXp*+! z*WgV`k!x(MK7_Aw8b^7^?KuC1QSW&W@?d1=cj}_1HTsUh>3@C-(>ADeNNAuaxTPcrHRdNQYOyka!)+4~pt_Se?%`LR z;xnbVV}q7fEhKKOH~ewy)Ya|Slm*-y!(D7)Ma_K*%o**nUm81s`t?3R<$}}M49N5# zO;E?-RNO8Q>V&^~b0AxTn{2rwaMM6V%>xSL3uu>2xLgQhlQT=!U!rWP1^I{yYZ~lH zCiu;Dr4yU|b(q->0jx?`SqgmC)WAIw&;orf@uOty$OokjUf03z=V70$*Vfi9CpRq= z6BFxuqRFa7_Xe7xHZ?Q6iq)$~_dm)_vy2nS?9xW&ct6KH?@M@!S2w7Qj z=(&&|);AuKJ$mZYdgjW#!lRHK-s#T|KF`4lq}0^Zf;%qCq)Ex|YizU%>b>pm?R_vc zRSXGr(F4tL*c4;KurAeqW_0EIH*LSGYHF$oXUzJ|^&Mz>+qx9d`YfxIVcpv04@u z7P_>TibzHZ0gUm@th>A0*2$^4z=W}M*)l}U-VX0sxfY*Qo3UcUk-WUm9lIV^fIX2} z*E1>gbxsAwdp~^&w6d{TYiukBsv-@H2jbq;uaTU`k8PiyHdu7U^IHAuYulqIMoAFE z)@W)<+`D%Vl!lF+oh0gfNr@P^e{hY~PEJeofU`lfu=n>@z+N=MaRRq@2)rNV)8nJT z@2b%EV~6$~IkFUaoQa9NiHS*l&Du`{aQhXwH+5(=#3Ug)*1bHtRYpZ6C+V**r214z zInzX4$S#&HU5fJe_fOoltD&J`-itFv$e}!K6uCrNS{k|d=FO@(bLK2qw5Xx4Za)P6 z0|_6JB*5sp?lCk$heZuEFCHT1#U4bGY#KpPcA?gW4GpmOE{Mljcuy6aXct?-SY!Dp z3!^+du-4Tf9pP_A2VlmeeOgNzIH%HHIGOnLW(iBYVtfv6^)iTckbN8)b|_e zoVEKYtaXQkm(nQ~GAVfY2Jx!TxY1AzKg z4W8SD1hSHnb$$=x9jC6&zn46&{rZM~PG4U@@aeQd%k+=!L|h&=GXz-6GC}dyXhKH= z2RD-ik+|;JvqhU9@s`~U>t4gmwFDg}4-1F*+3qmBukUG|ofzK+)Lv&ArD)0Pt3LZd z-u1WIs95{?74S@)R;*r~3HkcRiWX(dl$3`&lRqTHrj?s_W*^34BcsF5WXY98Q$y+qe39R*cE(Z(E%W;ALmX4_goK$fL!PTdlU!K_L zGZ#S${GQ0RvabpPh7{k+h|59y-9z+iMMVWtafpeDDIuauU^oMX>B{wQ?}V2ATkEct z1-3xFh3IQt>T@|OYirQg8@QaLu3rBN++g$_kzWV_bX-8s-S9SA)$!xUm-D{1T24vr z2w=Sg{!3!L9JEK2I^=1iqodoX)C;`zI_trjLECF>-h2{kEhacNi=@1UIHQz$az!l| z8)%lcvZ~uaCu?84c=5SCkAqGC7kmLkoDB}^lYD!N{7&jB&RM& zLL_Grvo2fFWDTItH85H@m+%ZR2)7Vtv%1**&5dUws}CJmvmB zGtS1^z4OMP90a2@w6(J{cXmF)lV1!SAerA&p@-{k0=K79n$nUDi8{9rS~df41@yU! zPg8_JTD8h3G71pC{{UNtA{1TRzuwRQ1S%=35%6pHJ^Yjj1%R3w-0SN*GK-4VKt?7H z+!zW;Nl8h@(I3T-s)4h!D)aJ1NHvB;hl3ET%cg*faQRJkm08o#)4X;fbmidWztg>;!#64Q4Gor0j7EW=${<=D*66b5 zujA|8puV!ZCvyl$p>fKDE~*bq0LeZf$BO##)&K#p0HJ_%O}wcQ9Y9h)n8zMKySw-s zXZgX*^dPPxyvOhfvCXxg%sc?h3$;R^(_K(P3*UU9 zmo=l*kS={AxsHB14URAhG(H6XjehrIFNH9@s`H1Z0V^NwC<~Q+g!6yY>m|$gN=Rl^ zC)eE2fUMg+rTS^MrFP;Z-c2n#cY!_uVPeiZze#uwRB7j-`Ucr7 z1d%86&I~B~qfip>kaqztb`AD8{~L0y=zSrD@@I~B`d2Tngq+?T{yoYEXi*LkuBUpH z0H|_0bQSw`DM}TKRR+*p$t3cnann&fH_kwO9H+mvI*@?5h2v`?1mF>%Lx?i#e=%R@ zUH7lahQrdIgA+lKduYc@^~aB7Sa?aW0Ti5oJ6nWrkqTQAxuWOM%e$d(bO`56eWtQ} zAwti*<#NWldf&6c`I?qc=!pDa3fZy)Qd5EY%4(=&UH>3%Ritcc}65Wn3V;U zGj2e~MHl15QJ{w7^`fl(h_e)2ahe3|wfW)bzFeGAc0@N6uhW(&X@lDYn22!P7*?JX zS_5i1OF&7$Q0K$m87KWL`6GbnHRhxCfFD-V(Sg!n$ez3nUTd_q?Wd&9l;h%{m%vmJ zYTjW<6sS6P{@L>DPstVMK}`bn6c@P7#^Y}sX9^jr!V9FNkWys-P#L=!m_#Y3d=bA4 zshnkXunU{T&JRo5MO)*iN!DDV5)vM-K@C$91wg?Pk52;}n%U9eURqOj-w@Jegh-NE zX+Y}we*8ELCNYG$IG^^iz#NJ$PzKfFm-!=I+`%_M==-;pa)8z>^u1K?EVY%s@=l- zy!B2{l-)a99)1PoVKEd^t^nOjr_({?$m#Ja;%i|Y6XALlsj?* z>f2SmNb!5t=79F8f=nm{tq1iagrj;_Mx5oXzuzcQ%oKKJ3r$X3;Sc6QhcC~WWrCv& zXf3jCE0cQ)RgQir2!ajTv5P`MK6d%PFUPe+eHQt}6WlYhv(uo|$me#iWoEQhPkvbq zRiQcH$&UT0Oko%H9MdEgMfO%NSiHCiiXkN&7kd{M8Aw*3GGGEM3_qo)KC5{Der=#X z{~{HX6C8=gD>te$AQ(BYi$kGmp1&nd8(e8RqDB}}dyzL1z;k#dz?_lSAL=Aj`lBCJ~vZaRd#_rNgaC%{d^a1(O*c4V(IEKPIWeu+|! z%s41R;Cpis01Q##f4*n&tq)o>%MwlZf?K7)8x|0Q+B8i-h>9Z>i#>Z#rjwtJ+1lD3 z!9G!kDkObY&CdG<*>PCB)+SAnpzcWopr+1YmyVP=wr>GG+}yaX!FnYg^j|U!I@Z5A*1U%+~3V zCPXPnD)TY4C4fAUv>D!~AU7VA@vb0?g9P~_1vT!1n$8ix_3$2(n1nwesZB`Vk~c;^CATva2ZRgf1zp?(eu{DL2oO*VOmYB7 zBSIoS4h$fnf{?E}XTO9r-rHMl?%cV3-@h+{@KL~<7Y!45Ic+v z^)Lto6Ar$AW@U!Y=x^d~2*j3yWE7t4A3^l?BOoM{bpL#lP?Yxy3?WM>=}9Omx&{Tw zdU(5f2D|!)$OaI|a0%}F2YPrDy$K$F+9=8^${&_jKCGaGRZx&n(pFN24@E^;byWqs zKkZ#T2?2ixRFRd30S=kEx_bwZ14Fzd|Fj^GT|NJdW~Fl2&%!mz$V#6`a8_3jwp5Pr z{?jIm5FFwi81Sc=0*o&UkNX`DIWmaw=duSO%-fy7xT=JLH~JeoJkk4au+G{41$dfjkSY4t(D}p^=!R^ z)U|CKrYmJoGw(?iivqLLdoBFF*ll?<2 z{d}S{4I;xL&BN8)(F%r&3f=@&csYzVH1I;Yo0Ig+@#@xYm`Gbo z6;m~mpLZnL*4@I=+EvXe#N5m`(AL<@!_3GsBE%yyGAcA&UeVCaP}fffP1ZH`@bDwr zhD2D}kpm6o6;)Mze6)RWD3Yg@kB^c!$`GxJM4>$4F##cp{%-QFkw)lnTQgk~MI&8P z9Tk10zyNPEcN7YiN#By}ZyI8$7_3L|v~!E_G_ke}4lu`u==l2t+o5oZCgwN=4ABVf zui&QcjmD~akyP<08^&v>n44=yl98cgJ8fdPxxR9gqDi2sKN3SAn}o{~-Th!}D@b7- zBK+@V8i5Z|Ggr5W#Ca+Q!5y+WQr{-X&)36BG1AnE5r%G*B_;-qhuCJ z3XxYc5B0Jokc_ z1Ak8sy+8wFWt6*~nwd6M)r1rorlJ;Lr9cdJSMxSgQB?OfRkKvnLmDd)4PlJ%D3S^z zavWp#D8Z=u|LmCG+Xw#siwR1ok~bZf5C~C(k)Ad-qG*2D7mHoyXI?BnJ}PN*Kx35p zB0Edh32l>qHgUl2g57l&%5g8&D_%rGS=zPh21n3VO3m93Nq4b_7)maAO;F`;?+;dI z-ItZmu3&nclXqOPbM=E;&Mi0Mw-@mX$2tScS4HKOIzK$%kW*?FHp&rSe6q*<=x6aE zqfMNg>J+N6p7{PeDbDP4JY7#u(T%p7vpxBL@S=FafS-O?m}=Pk;F*MkEia0-M7p}V zc-4c&*_k(Q+Pt~zwx>CH=JnS3p_-(c8nsiWPJNi4&-gY!bOoI+J3VmwAhdCn=amm0 zH+=HsiKwjX&TZSa_22d72@Vdnu(y|9yOvYvO3Uev{k8tx(8%cV+qw`=LBVvVhN#Cw zLjofsBfQ6=l+*S1@0E2(-XWms^8M5B)An}lxHwI-iyCKMzs6v(*hW9q&9zRg+}zxU z4j*ozS(RlK7iSg~34VOnAdr%hg0FI6f)VsHg)^$E+^rb`NfeiQbaZvIb8=46=~7+QAA^@?ael-1x%chcmnFW9`5+-`|sGz2%raXVf(`V0i3#DLBQH>AZa&2a2=T~O^{ytu7 z_ntkdHWCdMm|YOL>og08J_eIBjl3EpgIlwDo-b!G)p3ziNGKycoxO{cpb_=;P-$uD ztGW=+n+|mtvLNPAp49N0p}>*sdv4gE+APmW+f;m@<~!-shs{^TtOj?H z3|;Z`HzvWsiitagQf-_fQFqNX>Wm2l31a7^jLVlr{RVEKU_915DJBHl_U#HDPj~6* z>wl@t^21tLolZ_>-LiFS-A|R;t4C#UO^_=i3k#WFzmmHwvGX4upPufy!86%eET_S? z?FlB-z0B<5sp4XhVsiFCD!23pXetu_(~``!`vi6N+V0)EUqSA2@$jTkN{!B(Ia51V zQC~$G)9 zYeYmwrckuanlL?8u(o4U#qoQEP6w1;y}D)ie&`;d(?ut!bjv*l&7OQ7YeThN*2=zk zF=KHts7t|+D1+M(wcKMT8s+r`Ah^x8yOs<60_c?`zhDLm8KUmT?H?Qdx5)dH!?D_MFWZI#S$rq1v zB+wAf5s2Dv@Q!zXOEuAN&0x^rX?S5=J_%t|iN3u&ey6>3h?*0=V^7W~8OvepuYS zvHk<{P1I<&Q|vMhYh$B!B)(gK|5{m@Ze`yU$NDe^tnu^n1LmgXJ4J2#wb~hfxo<8z zJA2>w50|v&B#yC>B$0n!H2PB=3kroQb^j@GwmkvTY3y#FQ}dUJ=S&XqN==Q8$Nl=N z9^28KV?J*cH)DPl{V#bM3xnx-yaP3}NgPM4mWxYSY)bJ`M(cy1MRT<`7O_ zS&2B8pFj5K*SZ{Z1c=Sb&E4AIlqrGE!&jnn#BbWy?BtPi;`%AH>U+?v==j3B21pOa zB;M3b+DVT{dILn$>>~eRy6ei~_wNR=vB$oA`NG)Rzt$Jxdojmi*OV2Nl$a1Ul};_= z?PobfL^8X}%yo0bV>VW&u2)vJtu9TGzJ0Vs^!+N3k(S<$a8c%02z+;RZdn)^gotVo z1{PsuXUF#J(aM)~Je8T53HibT`LcEE))dMsv7wqkgPC?Jo;qch5&8iSAKyzjefpBwFgiTI=#4{#8O>d{c3I*NgCZ^R;=lN7vkDzJ4lIuE4zn=j{mtZ1sLs$wt;IpVe=%!g!U@8!+H#$KE(3~X|%7FAQ@$q{#L zN##by{X~*Agp7@i5eV4Hd~3^dNv*B=bB{e__HOSjw`YMB+|DC=`QE*~XAhd|!~FR8 z_+V}5F($YMJYDKqp64&E)$u*PG;18L#Q9MizAjh-arydnR*j20Z7LidKR(S;xIVZ6 z`9qfC6LR8|XW>^5JwXc_wE$g%13l{hqmSaUg!# zjYh}&`Q4OqD)UK$IdQm8ZeP>1$Ki0jow>QWBR~`Kg)d)rbbq~dL*S9w4vw^+7aF3! zkFHJ?GeCl|u~BJzRif_|-WkhKd)EjYCZWMRaFR-QW7#R#SmoLb`}iQQq6h2i?(Vw38r1=E|{@#%l6Kh&4Fnyf}Txn9EYy_yX2aTKPy=i-Trn`)RLyG(v zhK7c_{@|1P?E&_{z(9ao{Y#fFy`)T{W&z(%fKUONXTYj9kmY-06sQ9Jpy^lp^iU|^ z0LAml-n%J+>SBPj0Q#Kf$CkM?*S}f;8ZsRT``FlD?GE9vRAW_2tEzTwY-|9`ZT=c{ z%pwhLE@+L(sUPcZ!V|uOw&fy*U)$7l$KXAGX$WZ>8aE~-EzQZxdq{(M z_%cB3^@<9BqoM@rEVGZV@5szd3Zz&vWwaNvnZ+n4Xns%ys5%8Gx%A;fVU3HFHYaZJ z{rfiq)&V~_0LK%VcM2wh{7I&aSeqQ!!@PqZ0ZFKg)XGhCnc65)a zgoMOM$B_j+ZS9lOhYHStf~;_?cLO{ty^@tyjUDbExb4ZgYu5?F^Sl&FsU)adl#7cf z#BvbSMk7YzNlS}%W#3ri$<3u#t`Fur$E64#i*o5IMM3}LGPq5MqN9k1!n+e z_e)6pl}E#Gjx0Pl4U$XfHu0qn3Uy{0nToNtPQ0MxIZ8S5sgaJmq1U$Wj4SPRu^k}) z{qO}!``Lr_>jJN18T%_eU9Ydgu{vtm2Q*m;XfiP%BTeC$MeVN(??rt3D$caHaSF9d zKwei9U-EyQAL8LS_H|dz*|R7Eu`}+p98=Kmy95NbLynjfAvq76H451}^y<|KfM@rn zgl!DbaLv4!^7gGmSGA!8L(+0+{oGYjQsM=YrZt`K^kMsZ*T1!wr>3%rA3V4piR2X) z7UtE8J9^K5=)fO6>rovtWu~X6ciFsHbGo~XgZSe7NsuS3Q=PDH=dT%~(bfJ#swTyn zLf_v-%Q3U_o_PAy@G6y>LeZCM!$=GWX~hYC{`@&&@`C4wxw*7z_ck$QWv=aeWsdv! zNSPHOM_wNVP?x`8!YB|xWi#pyS;zY>5I+7kaR=~nUIjc z$j!yCUlSg?G$wQI-^I;+W_sF-Q5O}e1?m%t2ZBRFEFB!ub8>c{%grqT!P2;VWilVW zf=+8`X?gncB??-p2EI#=T$*%!c1|)mHMI#wk|&Moff5lF6-B@_C8wpGZuih3k*Yr| zuzK^Ma}yZk1x#G+*(odzxtenD(ut%b7Q31N&?2wyhx&q?YM!r!%8uyc zuo|8;&yf(ILG zbEkIiQGtmc`9lgxfZlG4A09InT-@kXem;+WIxneviCsif5Y`&X7x=9Gwef=l3dePQ zZCTdL{EZ;Is5+y(xN@b7j*~ViRNY}~Yg-$$st6kDG*HbIkl*o98z3MS7z00e@W29x zJM-d&38+x+9Pxbz4lu(jg>)U>x5kamb(pQOpGn*yP!RhN*rw)Plr&kxF-^9!t&JuA z$I$zr**7$zh`EW$_%~ynDr~z%z`fjeCT3=4Y1{7_!7$my#qr&pq-Y5n$oTG`J=U{6 zZ=Kq?@pSR}^z`(-{?yW-XbDd@T8>P>TVoc0ml$b($$(06wLB?xy%~FNG<2~9y^?ivgWfk=c}B{*6FA%m-FDSxA0+prtm z^D;;}bVoqtr!+v&f_txje5%j1ry<@t4R1%|n&awoi9}*5WxThx;XddaW|A~cGoW&? z1=Ev!tsI*p9zd09n#V;0eYcfkkJL8!fHpo;o9iP`V-pHWg|!Cry?GuNi>I@NL)jTX zeKL#_phZ?zR%7`&!5sMK_k~k$ zYjgQC^aH5k+XE^`tFeyh_L6mMH8u)(x-2x*8~rwGCxbgUYHe)XJm0g5rz--(;cu6p zu$PqNtg)GLr9B&OEk7rKplz$MiLAy7L8m(jLpP4%>0w@L?H%w}>d=jKJpJ(3q4IND zE$u=<(RYQ=%W^@|689-XzRzn-X$3EdP$akmBmy%x{#;#R6ua5`4Vql!Mdjsp9gF^^ z2Ws%u!`Nsj;l@7IFC@a&3#gx0Wmq4KyFdlthv-uTQ()163ZOn`f}&T)%xou+7wSD$ ztZ+b4q-How;ONn#LEneNo59@xtelE@d;BbQZT(KuMOI#>eqi_xITRU!GkQTtEHg~dIyyQo^1M~xbpWnHeGnJqeQ9*m?c>|JJs|260Rn4DW3Omd2LTs<>rFE= z3VJCkdMu_kPYQjvx*CX~ea*N11k+}D-(H!RpP$f>Muwe6qm^FCZVl3e(v24I?g1Y}sy)|R&dJZ83RVpkhfARl@^9Ewy1_-(ZQD+Y z8}S|qlhIuHuo-x!1u63E@(RXZgEz&jtoe4QW*Q(CUtGk&nc_tTi8AVxt5?N=7*E1@7dcvTXMu>j!0pg8FxbC; zzb;v@gd%(_6|^|{(_22M#<+x%xK1p6mZo+jxfw6d*Sv2nuX&dWf?;g7=LRtNvs`JL z#n))L5=I^NFs2Tu9x*kRDe147kni(vGlYX)|9_Yv!Au-sF>I$ZEbDY_>6sZ%u~&ArBk|wG!P{`p!p2V&BJGweO`jP?z&+k2NdN@ z=E~um(iJY9Y^GA9X5UDztgcGVY7-9QtKGK#TwnhTHB2f+XXl3)WYUePW&*V!E9(Rh zM`>+s|4iQm1soYtpJTY|SZfBT8X=@e$HcSMAR`)VU)~J;GA9;CHNrElKUz}tLrg72 zda`+jI#?6v9J4l4W>z1w)&K?oJFon4GP@5`N79^nv}Tbtop_s1NP~PlAfV!7zG?j7 zBS!?FE`X}0^vWw!$b{g@7$g<@lBo2cp~2GH`oP>oj=!u!EyCQ~{GqskE3DCno2yw3 zSRg-!d>op{9^15OlZCAu4ajB++Su5z$jQldDX7P_fSLuh;v5nU$7R4FWaZ%C5o$h6 z(mZEgv6@B2Izz=R<9Pqgi2QgAI#1>?6lHqg4i#}MXSr(%%J#(m_~yE@II{DMXkXOV zE`!fKHqI&FDN=`B<6Z?!;)(_jq=xYc{#Oz!Vrzt z4GIEprQpe`g;w-ba|OoK+}wanzVp&X|Jl;pkj|0Ck>*l&FSaS&BOfnt)HBLQQ3;8b zYfD**{sXtRa&mJ1_iiEj?jINCu8M%&y?==bKj{lmCfs zt#LEQhx&zwA>xJ4xp{c(7eq}#J}dYSDucg}>*=2i0xT(o|>K{kpItU_V5C|MX{qks>ED zsDof~;xw~jKaZk*{&@c@(X8zD#<`2?AqQsX<_d?s!K+|IfoKE|SRwL@2iPB1=yW!) zR%T{qs_-%qqbUa$flEq3k>!Ywd>puqII*@C^>a+*m&L@7TIc)ZZQxaxx?ks?I=#fy zkEa8^QO0+d-oMYEFXw!3qyE7IL7)j(5cqugNm*z>w3k-!Fbqu-7TqOUmNyjq|z zNO_NS?;>by79U8MmPx^Gr~-4OTrxk1vjtDydRRPY{K6EMscA0PjwuTSkqEzl$Y zJ-h;(7J$EfvxWra8@?e*1;MaFfKS_^7aEeFG>4UhLj#7-4pIT?)WiY5R!|E;FeCZz z%?wCZp@NLqYahGQ0ZkxMY3rNEq1FW}=HbSgFRU*>f=Ye2$pyu&fe%*?fMABIp?qp{ z+aN`v(R5N#qXKd5PhsHEh!sD_6FzTY0MP8JgL>j}^ITB>#Li~07R zRK(H2;^IzgZEdB03XL#@sy7jI$OyopzQ6yxNSq^NC1WmQolBr@-Rt-bTu>L)Lc;So zA8;yLmO8cMes8{wm0QE!ntoC}V5Ydfdk{&19UcXS%3`m5U<#$gr-)w@netuj+x;;pdLBjKx{+OdZa0uW*APf{{ za2?khGuHi;s2zGJ@RaVc@$($E#@vXe z=4OzFV~Z16OCG;{no~i4NUR*oblJWp;E<$bHiHAfpw!M3J_ao2Tf{Mu69OEGLo9yz zqT}dz(J$YCa)U}y z|MqaEnTt3GLA?x;KRLHhalLyc!p{h7GR0SSeQmlFraAspoDzB)_hX*XKuSsqJf(BDZYjPfQr{E)mc(LRZYQC!b6ex; zio0JunJ}i!|Hdq&O z5Dtc3m6?mcerHi4zGyTMiA`Xz7A4FIxVrzIzW~F60?g*w%*+l)KjN=nzt%I@zhg8e z`N|-sp$ps;#`%EfrAX$74<9n>2ylU_T^?_`dhOcilaxKOFRd&r5Mcal-M&{8tfR+| zA5XugZa-jh{siEysGM9UHIh<^>4V=Bg))8%@U(%X=>SF4nmwGI0JnmNqYDSaupM@Q zR%M(P^D1nDl8wPmU}YqkYp!m&aNz>O5h9Tkz+JZh1FxWfAJLb|<^*5CLIUNmAGilv zt&E>FXE?PF0KotMj!{=7h;wQ?rr`omD@a$t=x-mjF__(up9$XG*G6W_iuIu&TKhJ? zT}w-gJlDH}p}=8Fb8>MdfCX0yx(ZaV0LaQ$Am5=B9b -- bash + +In order to replicate the container startup scripts execute this command: + +For Kvrocks: + + /opt/drycc/scripts/kvrocks/entrypoint.sh /opt/drycc/scripts/kvrocks/run.sh + +{{- if .Values.sentinel.enabled }} + +For Sentinel: + + /opt/drycc/scripts/sentinel/entrypoint.sh /opt/drycc/scripts/sentinel/run.sh + +{{- end }} +{{- else }} + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have + most likely exposed the Kvrocks; service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.enabled=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if eq .Values.architecture "replication" }} +{{- if .Values.sentinel.enabled }} + +Kvrocks; can be accessed via port {{ .Values.sentinel.service.ports.kvrocks }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Kvrocks; Sentinel cluster, which is available in port {{ .Values.sentinel.service.ports.sentinel }} using the same domain name above. + +{{- else }} + +Kvrocks; can be accessed on the following DNS names from within your cluster: + + {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.ports.kvrocks }}) + {{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.ports.kvrocks }}) + +{{- end }} +{{- else }} + +Kvrocks; can be accessed via port {{ .Values.master.service.ports.kvrocks }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.auth.enabled }} + +To get your password run: + + export KVROCKS_REQUIREPASS=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "kvrocks.secretName" . }} -o jsonpath="{.data.kvrocks-password}" | base64 --decode) + +{{- end }} + +To connect to your Kvrocks; server: + +1. Run a Kvrocks; pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env KVROCKS_REQUIREPASS=$KVROCKS_REQUIREPASS {{ end }} --image {{ template "kvrocks.image" . }} --command -- sleep infinity + + Use the following command to attach to the pod: + + kubectl exec --tty -i redis-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash + +2. Connect using the Kvrocks; CLI: + +{{- if eq .Values.architecture "replication" }} + {{- if .Values.sentinel.enabled }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.kvrocks }} # Read only operations + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.sentinel }} # Sentinel access + {{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }} + {{- end }} +{{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h {{ template "common.names.fullname" . }}-master +{{- end }} + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to kvrocks. + +{{- else }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{- if contains "NodePort" .Values.sentinel.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT + +{{- else if contains "LoadBalancer" .Values.sentinel.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.ports.kvrocks }} + +{{- else if contains "ClusterIP" .Values.sentinel.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.ports.kvrocks }}:{{ .Values.sentinel.service.ports.kvrocks }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.ports.kvrocks }} + +{{- end }} +{{- else }} +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.port }}:{{ .Values.master.service.port }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.master.service.port }} + +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- include "kvrocks.checkRollingTags" . }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctl.image }} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Release.IsUpgrade ) }} +{{- if $.Values.sentinel.service.nodePorts.sentinel }} +No need to upgrade, ports and nodeports have been set from values +{{- else }} +#!#!#!#!#!#!#!# IMPORTANT #!#!#!#!#!#!#!# +YOU NEED TO PERFORM AN UPGRADE FOR THE SERVICES AND WORKLOAD TO BE CREATED +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/_helpers.tpl b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/_helpers.tpl new file mode 100644 index 00000000..c7e60381 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/_helpers.tpl @@ -0,0 +1,208 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper kvrocks image name +*/}} +{{- define "kvrocks.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper kvrocks Sentinel image name +*/}} +{{- define "kvrocks.sentinel.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper kvrocks Proxy image name +*/}} +{{- define "kvrocks.proxy.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.proxy.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "kvrocks.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kvrocks.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "kvrocks.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kvrocks.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kvrocks.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the configuration configmap name +*/}} +{{- define "kvrocks.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kvrocks.createConfigmap" -}} +{{- if empty .Values.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "kvrocks.secretName" -}} +{{- if .Values.auth.existingSecret -}} +{{- printf "%s" .Values.auth.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from kvrocks™ secret. +*/}} +{{- define "kvrocks.secretPasswordKey" -}} +{{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}} +{{- printf "%s" .Values.auth.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "kvrocks-password" -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return kvrocks™ password +*/}} +{{- define "kvrocks.password" -}} +{{- if not (empty .Values.global.kvrocks.password) }} + {{- .Values.global.kvrocks.password -}} +{{- else if not (empty .Values.auth.password) -}} + {{- .Values.auth.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "kvrocks-password") -}} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "kvrocks.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.sentinel.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- end -}} + +{{/* Validate values of kvrocks™ - spreadConstrainsts K8s version */}} +{{- define "kvrocks.validateValues.topologySpreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.topologySpreadConstraints -}} +kvrocks: topologySpreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* Validate values of kvrocks™ - must provide a valid architecture */}} +{{- define "kvrocks.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}} +kvrocks: architecture + Invalid architecture selected. Valid values are "standalone" and + "replication". Please set a valid architecture (--set architecture="xxxx") +{{- end -}} +{{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }} +kvrocks: architecture + Using kvrocks sentinel on standalone mode is not supported. + To deploy kvrocks sentinel, please select the "replication" mode + (--set "architecture=replication,sentinel.enabled=true") +{{- end -}} +{{- end -}} + +{{/* Validate values of kvrocks™ - PodSecurityPolicy create */}} +{{- define "kvrocks.validateValues.podSecurityPolicy.create" -}} +{{- if and .Values.podSecurityPolicy.create (not .Values.podSecurityPolicy.enabled) }} +kvrocks: podSecurityPolicy.create + In order to create PodSecurityPolicy, you also need to enable + podSecurityPolicy.enabled field +{{- end -}} +{{- end -}} + +{{/* Define the suffix utilized for external-dns */}} +{{- define "kvrocks.externalDNS.suffix" -}} +{{ printf "%s.%s" (include "common.names.fullname" .) .Values.useExternalDNS.suffix }} +{{- end -}} + +{{/* Compile all annotations utilized for external-dns */}} +{{- define "kvrocks.externalDNS.annotations" -}} +{{- if .Values.useExternalDNS.enabled }} +{{ .Values.useExternalDNS.annotationKey }}hostname: {{ include "kvrocks.externalDNS.suffix" . }} +{{- range $key, $val := .Values.useExternalDNS.additionalAnnotations }} +{{ $.Values.useExternalDNS.annotationKey }}{{ $key }}: {{ $val | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/configmap.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/configmap.yaml new file mode 100644 index 00000000..5607837a --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/configmap.yaml @@ -0,0 +1,27 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + sentinel.conf: |- + dir "/tmp" + port {{ .Values.sentinel.containerPorts.sentinel }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.ports.kvrocks }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} + # User-supplied sentinel configuration: + {{- if .Values.sentinel.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }} + {{- end }} + # End of sentinel configuration +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/extra-list.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/headless-svc.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/headless-svc.yaml new file mode 100644 index 00000000..6d30fd02 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/headless-svc.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- include "kvrocks.externalDNS.annotations" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-kvrocks + port: {{ if .Values.sentinel.enabled }}{{ .Values.sentinel.service.ports.kvrocks }}{{ else }}{{ .Values.master.service.ports.kvrocks }}{{ end }} + targetPort: kvrocks + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.ports.sentinel }} + targetPort: sentinel + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/health-configmap.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/health-configmap.yaml new file mode 100644 index 00000000..257367ac --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/health-configmap.yaml @@ -0,0 +1,141 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -n "$KVROCKS_REQUIREPASS" ]] && export REDISCLI_AUTH="$KVROCKS_REQUIREPASS" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $KVROCKS_PORT \ + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -n "$KVROCKS_REQUIREPASS" ]] && export REDISCLI_AUTH="$KVROCKS_REQUIREPASS" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $KVROCKS_PORT \ + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash + +{{- if .Values.auth.sentinel }} + [[ -n "$KVROCKS_REQUIREPASS" ]] && export REDISCLI_AUTH="$KVROCKS_REQUIREPASS" +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $SENTINEL_PORT \ + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -n "$KVROCKS_MASTERAUTH" ]] && export REDISCLI_AUTH="$KVROCKS_MASTERAUTH" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $KVROCKS_MASTER_HOST \ + -p $KVROCKS_PORT \ + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -n "$KVROCKS_MASTERAUTH" ]] && export REDISCLI_AUTH="$KVROCKS_MASTERAUTH" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $KVROCKS_MASTER_HOST \ + -p $KVROCKS_PORT \ + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/application.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/application.yaml new file mode 100644 index 00000000..b44bf429 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/application.yaml @@ -0,0 +1,411 @@ +{{- if or (not (eq .Values.architecture "replication")) (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: {{ .Values.master.kind }} +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + {{- if (eq .Values.master.kind "StatefulSet") }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.master.updateStrategy }} + {{- if (eq .Values.master.kind "Deployment") }} + strategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- else }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kvrocks.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "kvrocks.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kvrocks.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName | quote }} + {{- end }} + {{- if .Values.master.dnsPolicy }} + dnsPolicy: {{ .Values.master.dnsPolicy }} + {{- end }} + {{- if .Values.master.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + containers: + - name: kvrocks + image: {{ template "kvrocks.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/drycc/scripts/start-scripts/start-master.sh + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KVROCKS_CLUSTER_ENABLED + value: "no" + {{- if .Values.auth.enabled }} + - name: KVROCKS_MASTERAUTH + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + - name: KVROCKS_PORT + value: {{ .Values.master.containerPorts.kvrocks | quote }} + - name: KVROCKS_BIND + value: "0.0.0.0" + - name: KVROCKS_DIR + value: "{{ .Values.master.persistence.path }}" + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: kvrocks + containerPort: {{ .Values.master.containerPorts.kvrocks }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kvrocks + {{- else if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/drycc/scripts/start-scripts + - name: health + mountPath: /health + - name: kvrocks-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: kvrocks-tmp-conf + mountPath: /opt/drycc/kvrocks/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "kvrocks.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + - -c + - | + kvrocks_exporter --kvrocks.addr=localhost:$(KVROCKS_PORT) {{- if .Values.auth.enabled }} --kvrocks.password $(KVROCKS_REQUIREPASS){{- end }} {{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + - name: KVROCKS_PORT + value: {{ .Values.master.containerPorts.kvrocks | quote }} + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.master.podSecurityContext.enabled .Values.master.containerSecurityContext.enabled }} + {{- if or .Values.master.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "kvrocks.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: kvrocks-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "kvrocks.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: kvrocks-tmp-conf + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: kvrocks-data + {{- if .Values.master.persistence.medium }} + emptyDir: { + medium: {{ .Values.master.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.master.persistence.existingClaim }} + - name: kvrocks-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.master.persistence.existingClaim .) }} + {{- else if (eq .Values.master.kind "Deployment") }} + - name: kvrocks-data + persistentVolumeClaim: + claimName: {{ printf "kvrocks-data-%s-master" (include "common.names.fullname" .) }} + {{- else }} + {{- if .Values.master.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.master.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.master.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: kvrocks-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/psp.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/psp.yaml new file mode 100644 index 00000000..2ba93b6e --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/psp.yaml @@ -0,0 +1,46 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.podSecurityContext.fsGroup }} + max: {{ .Values.master.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/pvc.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/pvc.yaml new file mode 100644 index 00000000..15b88c1c --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/pvc.yaml @@ -0,0 +1,26 @@ +{{- if and (eq .Values.architecture "standalone") (eq .Values.master.kind "Deployment") (.Values.master.persistence.enabled) (not .Values.master.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "kvrocks-data-%s-master" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 4 }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/service.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/service.yaml new file mode 100644 index 00000000..ca0f4f37 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/master/service.yaml @@ -0,0 +1,49 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.master.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.master.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.master.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.master.service.type "ClusterIP") .Values.master.service.clusterIP }} + clusterIP: {{ .Values.master.service.clusterIP }} + {{- end }} + ports: + - name: tcp-kvrocks + port: {{ .Values.master.service.ports.kvrocks }} + targetPort: kvrocks + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) .Values.master.service.nodePorts.kvrocks}} + nodePort: {{ .Values.master.service.nodePorts.kvrocks}} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.master.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/metrics-svc.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/metrics-svc.yaml new file mode 100644 index 00000000..94459ec0 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/metrics-svc.yaml @@ -0,0 +1,38 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.port }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/networkpolicy.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/networkpolicy.yaml new file mode 100644 index 00000000..e51701fe --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/networkpolicy.yaml @@ -0,0 +1,80 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + {{- if or (eq .Values.architecture "replication") .Values.networkPolicy.extraEgress }} + - Egress + egress: + - {} + {{- end }} + ingress: + {{- if or (eq .Values.sentinel.service.type "LoadBalancer") (eq .Values.master.service.type "LoadBalancer") (eq .Values.replica.service.type "LoadBalancer") }} + - {} + {{- else }} + # Allow inbound connections + - ports: + - port: {{ .Values.master.containerPorts.kvrocks }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + {{- if .Values.proxy.enabled }} + - port: {{ .Values.proxy.containerPorts.proxy }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/pdb.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/pdb.yaml new file mode 100644 index 00000000..f82d278a --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/prometheusrule.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/prometheusrule.yaml new file mode 100644 index 00000000..cd8bc68b --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "common.names.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/hpa.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/hpa.yaml new file mode 100644 index 00000000..468a504c --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/hpa.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.replica.autoscaling.enabled (not .Values.sentinel.enabled) }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/service.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/service.yaml new file mode 100644 index 00000000..2fb92456 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/service.yaml @@ -0,0 +1,49 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.replica.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.replica.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.replica.service.type }} + {{- if eq .Values.replica.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.replica.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") .Values.replica.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.replica.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") .Values.replica.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.replica.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.replica.service.type "ClusterIP") .Values.replica.service.clusterIP }} + clusterIP: {{ .Values.replica.service.clusterIP }} + {{- end }} + ports: + - name: tcp-kvrocks + port: {{ .Values.replica.service.ports.kvrocks }} + targetPort: kvrocks + {{- if and (or (eq .Values.replica.service.type "NodePort") (eq .Values.replica.service.type "LoadBalancer")) .Values.replica.service.nodePorts.kvrocks}} + nodePort: {{ .Values.replica.service.nodePorts.kvrocks}} + {{- else if eq .Values.replica.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.replica.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: replica +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/statefulset.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/statefulset.yaml new file mode 100644 index 00000000..990e8d26 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/replicas/statefulset.yaml @@ -0,0 +1,410 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.replica.autoscaling.enabled }} + replicas: {{ .Values.replica.replicaCount }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: replica + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kvrocks.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "kvrocks.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kvrocks.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "replica" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "replica" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.replica.terminationGracePeriodSeconds }} + containers: + - name: kvrocks + image: {{ template "kvrocks.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/drycc/scripts/start-scripts/start-replica.sh + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KVROCKS_CLUSTER_ENABLED + value: "no" + - name: KVROCKS_MASTER_HOST + value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: KVROCKS_MASTER_PORT_NUMBER + value: {{ .Values.master.containerPorts.kvrocks | quote }} + - name: KVROCKS_CFG_SLAVEOF + value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.master.containerPorts.kvrocks | quote }} + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + - name: KVROCKS_MASTERAUTH + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + - name: KVROCKS_PORT + value: {{ .Values.replica.containerPorts.kvrocks | quote }} + - name: KVROCKS_BIND + value: "0.0.0.0" + - name: KVROCKS_DIR + value: "{{ .Values.replica.persistence.path }}" + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: kvrocks + containerPort: {{ .Values.replica.containerPorts.kvrocks }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kvrocks + {{- else if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- else if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/drycc/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: kvrocks-password + mountPath: /opt/drycc/kvrocks/secrets/ + {{- end }} + - name: kvrocks-data + mountPath: /data + subPath: {{ .Values.replica.persistence.subPath }} + - name: kvrocks-tmp-conf + mountPath: /opt/drycc/kvrocks/etc + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "kvrocks.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + - -c + - | + kvrocks_exporter --kvrocks.addr=localhost:$(KVROCKS_PORT) {{- if .Values.auth.enabled }} --kvrocks.password $(KVROCKS_REQUIREPASS){{- end }} {{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + - name: KVROCKS_PORT + value: {{ .Values.replica.containerPorts.kvrocks | quote }} + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: kvrocks-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "kvrocks.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: kvrocks-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "kvrocks.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: kvrocks-password + secret: + secretName: {{ template "kvrocks.secretName" . }} + items: + - key: {{ template "kvrocks.secretPasswordKey" . }} + path: kvrocks-password + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: kvrocks-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: kvrocks-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + {{- if .Values.replica.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.replica.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.replica.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: kvrocks-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.replica.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/role.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/role.yaml new file mode 100644 index 00000000..0475e0da --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/role.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.podSecurityPolicy.enabled }} + - apiGroups: + - '{{ template "podSecurityPolicy.apiGroup" . }}' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: [{{ template "common.names.fullname" . }}] + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/rolebinding.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/rolebinding.yaml new file mode 100644 index 00000000..74968b8f --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "common.names.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/scripts-configmap.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/scripts-configmap.yaml new file mode 100644 index 00000000..2b44e8c7 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/scripts-configmap.yaml @@ -0,0 +1,431 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/drycc/scripts/libos.sh + . /opt/drycc/scripts/liblog.sh + . /opt/drycc/scripts/libvalidations.sh + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "KVROCKS") + echo {{ .Values.master.containerPorts.kvrocks }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + KVROCKSPORT=$(get_port "$HOSTNAME" "KVROCKS") + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + KVROCKS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + validate_quorum() { + + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $KVROCKS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel master {{ .Values.sentinel.masterSet }}" + + info "about to run the command: $quorum_info_command" + eval $quorum_info_command | grep -Fq "s_down" + } + + trigger_manual_failover() { + + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $KVROCKS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel failover {{ .Values.sentinel.masterSet }}" + + info "about to run the command: $failover_command" + eval $failover_command + } + + get_sentinel_master_info() { + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$KVROCKS_REQUIREPASS" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $KVROCKS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd kvrocks + chown -R kvrocks {{ .Values.replica.persistence.path }} + {{- end }} + + # check if there is a master + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + master_port_in_persisted_conf="$KVROCKS_PORT" + master_in_sentinel="$(get_sentinel_master_info)" + kvrocksRetVal=$? + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/drycc/redis-sentinel/etc/sentinel.conf ]]; then + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/drycc/redis-sentinel/etc/sentinel.conf)" + master_port_in_persisted_conf="$(awk '/monitor/ {print $5}' /opt/drycc/redis-sentinel/etc/sentinel.conf)" + info "Found previous master ${master_in_persisted_conf}:${master_port_in_persisted_conf} in /opt/drycc/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/drycc/redis-sentinel/etc/sentinel.conf | grep monitor)" + touch /opt/drycc/redis-sentinel/etc/.node_read + fi + {{- end }} + + if [[ $kvrocksRetVal -ne 0 ]]; then + if [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 1: No active sentinel and in previous sentinel.conf we were the master --> MASTER + info "Configuring the node as master" + export KVROCKS_REPLICATION_MODE="master" + else + # Case 2: No active sentinel and in previous sentinel.conf we were not master --> REPLICA + info "Configuring the node as replica" + export KVROCKS_REPLICATION_MODE="slave" + fi + else + # Fetches current master's host and port + SENTINEL_INFO=($(get_sentinel_master_info)) + info "Current master: SENTINEL_INFO=(${SENTINEL_INFO[0]},${SENTINEL_INFO[1]})" + KVROCKS_MASTER_HOST=${SENTINEL_INFO[0]} + KVROCKS_MASTER_PORT_NUMBER=${SENTINEL_INFO[1]} + + if [[ "$KVROCKS_MASTER_HOST" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 3: Active sentinel and master it is this node --> MASTER + info "Configuring the node as master" + export KVROCKS_REPLICATION_MODE="master" + else + # Case 4: Active sentinel and master is not this node --> REPLICA + info "Configuring the node as replica" + export KVROCKS_REPLICATION_MODE="slave" + {{- if and .Values.sentinel.automateClusterRecovery (le (int .Values.sentinel.downAfterMilliseconds) 2000) }} + retry_count=1 + while validate_quorum + do + info "sleeping, waiting for Kvrocks master to come up" + sleep 1s + if ! ((retry_count % 11)); then + info "Trying to manually failover" + failover_result=$(trigger_manual_failover) + + debug "Failover result: $failover_result" + fi + + ((retry_count+=1)) + done + info "Kvrocks master is up now" + {{- end }} + fi + fi + + export KVROCKS_CFG_RELICA__ANNOUNCE__PORT="$KVROCKSPORT" + export KVROCKS_CFG_RELICA__ANNOUNCE__IP=$(get_full_hostname "$HOSTNAME") + #export KVROCKS_BIND="localhost $(get_full_hostname "$HOSTNAME")" + + if [[ "$KVROCKS_REPLICATION_MODE" = "slave" ]]; then + export KVROCKS_CFG_SLAVEOF="${KVROCKS_MASTER_HOST} ${KVROCKS_MASTER_PORT_NUMBER}" + fi + + exec init-stack /opt/drycc/scripts/kvrocks/entrypoint.sh /opt/drycc/scripts/kvrocks/run.sh + + start-sentinel.sh: | + #!/bin/bash + + . /opt/drycc/scripts/libos.sh + . /opt/drycc/scripts/libvalidations.sh + . /opt/drycc/scripts/libfile.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + KVROCKS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "KVROCKS") + echo {{ .Values.master.containerPorts.kvrocks }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + SERVPORT=$(get_port "$HOSTNAME" "SENTINEL") + KVROCKSPORT=$(get_port "$HOSTNAME" "KVROCKS") + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/drycc/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/drycc/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + get_sentinel_master_info() { + + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$KVROCKS_REQUIREPASS" {{ end }}redis-cli -h $KVROCKS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/drycc/redis-sentinel/etc/sentinel.conf ]]; then + check_lock_file() { + [[ -f /opt/drycc/redis-sentinel/etc/.node_read ]] + } + retry_while "check_lock_file" + rm -f /opt/drycc/redis-sentinel/etc/.node_read + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/drycc/redis-sentinel/etc/sentinel.conf)" + info "Found previous master $master_in_persisted_conf in /opt/drycc/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/drycc/redis-sentinel/etc/sentinel.conf | grep monitor)" + fi + {{- end }} + if ! get_sentinel_master_info && [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # No master found, lets create a master node + export KVROCKS_REPLICATION_MODE="master" + + KVROCKS_MASTER_HOST=$(get_full_hostname "$HOSTNAME") + KVROCKS_MASTER_PORT_NUMBER="$KVROCKSPORT" + else + export KVROCKS_REPLICATION_MODE="slave" + + # Fetches current master's host and port + SENTINEL_INFO=($(get_sentinel_master_info)) + info "printing SENTINEL_INFO=(${SENTINEL_INFO[0]},${SENTINEL_INFO[1]})" + KVROCKS_MASTER_HOST=${SENTINEL_INFO[0]} + KVROCKS_MASTER_PORT_NUMBER=${SENTINEL_INFO[1]} + fi + + cp /opt/drycc/redis-sentinel/mounted-etc/sentinel.conf /opt/drycc/redis-sentinel/etc/sentinel.conf + {{- if .Values.auth.enabled }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$KVROCKS_REQUIREPASS" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + {{- if and .Values.auth.enabled .Values.auth.sentinel }} + printf "\nrequirepass %s" "$KVROCKS_REQUIREPASS" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$KVROCKS_MASTER_HOST" "$KVROCKS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_known_sentinel() { + hostname="$1" + ip="$2" + + if [[ -n "$hostname" && -n "$ip" && "$hostname" != "$HOSTNAME" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "SENTINEL") $(host_id "$hostname")" + fi + } + add_known_replica() { + hostname="$1" + ip="$2" + + if [[ -n "$ip" && "$(get_full_hostname "$hostname")" != "$KVROCKS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "KVROCKS")" + fi + } + + # Add available hosts on the network as known replicas & sentinels + for node in $(seq 0 $(({{ .Values.replica.replicaCount }}-1))); do + hostname="{{ template "common.names.fullname" . }}-node-$node" + ip="$(getent hosts "$hostname.$HEADLESS_SERVICE" | awk '{ print $1 }')" + add_known_sentinel "$hostname" "$ip" + add_known_replica "$hostname" "$ip" + done + + echo "" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-hostnames yes" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + echo "sentinel resolve-hostnames yes" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-port $SERVPORT" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + echo "sentinel announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/drycc/redis-sentinel/etc/sentinel.conf + + exec redis-server /opt/drycc/redis-sentinel/etc/sentinel.conf --sentinel + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/drycc/scripts/libvalidations.sh + . /opt/drycc/scripts/libos.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + SENTINEL_SERVICE_ENV_NAME={{ printf "%s%s" (upper (include "common.names.fullname" .)| replace "-" "_") "_SERVICE_PORT_TCP_SENTINEL" }} + SENTINEL_SERVICE_PORT=${!SENTINEL_SERVICE_ENV_NAME} + + get_full_hostname() { + hostname="$1" + + {{- if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + run_sentinel_command() { + redis-cli -h "$KVROCKS_SERVICE" -p "$SENTINEL_SERVICE_PORT" sentinel "$@" + } + failover_finished() { + SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}")) + KVROCKS_MASTER_HOST="${SENTINEL_INFO[0]}" + [[ "$KVROCKS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]] + } + + KVROCKS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$KVROCKS_REQUIREPASS" ]] && export REDISCLI_AUTH="$KVROCKS_REQUIREPASS" + + if ! failover_finished; then + echo "I am the master pod and you are stopping me. Starting sentinel failover" + # if I am the master, issue a command to failover once and then wait for the failover to finish + run_sentinel_command failover "{{ .Values.sentinel.masterSet }}" + if retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1; then + echo "Master has been successfuly failed over to a different pod." + exit 0 + else + echo "Master failover failed" + exit 1 + fi + else + exit 0 + fi + prestop-redis.sh: | + #!/bin/bash + + . /opt/drycc/scripts/libvalidations.sh + . /opt/drycc/scripts/libos.sh + + run_redis_command() { + redis-cli -h 127.0.0.1 -p ${KVROCKS_PORT} "$@" + } + failover_finished() { + REDIS_ROLE=$(run_redis_command role | head -1) + [[ "$REDIS_ROLE" != "master" ]] + } + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$KVROCKS_REQUIREPASS" ]] && export REDISCLI_AUTH="$KVROCKS_REQUIREPASS" + + if ! failover_finished; then + echo "Waiting for sentinel to run failover for up to {{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}s" + retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1 + else + exit 0 + fi + +{{- else }} + start-master.sh: | + #!/bin/bash + + {{- if and .Values.master.containerSecurityContext.runAsUser (eq (.Values.master.containerSecurityContext.runAsUser | int) 0) }} + useradd kvrocks + chown -R kvrocks {{ .Values.master.persistence.path }} + {{- end }} + #export KVROCKS_BIND="localhost $(get_full_hostname "$HOSTNAME")" + exec init-stack /opt/drycc/scripts/kvrocks/entrypoint.sh /opt/drycc/scripts/kvrocks/run.sh + {{- if eq .Values.architecture "replication" }} + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "KVROCKS") + echo {{ .Values.master.containerPorts.kvrocks }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + KVROCKSPORT=$(get_port "$HOSTNAME" "KVROCKS") + + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd kvrocks + chown -R kvrocks {{ .Values.replica.persistence.path }} + {{- end }} + export KVROCKS_CFG_RELICA__ANNOUNCE__PORT=$(KVROCKS) + export KVROCKS_CFG_RELICA__ANNOUNCE__IP=$(get_full_hostname "$HOSTNAME") + export KVROCKS_CFG_SLAVEOF="${KVROCKS_MASTER_HOST} ${KVROCKS_MASTER_PORT_NUMBER}" + #export KVROCKS_BIND="localhost $(get_full_hostname "$HOSTNAME")" + exec init-stack /opt/drycc/scripts/kvrocks/entrypoint.sh /opt/drycc/scripts/kvrocks/run.sh + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/secret.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/secret.yaml new file mode 100644 index 00000000..3ff2cf14 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/secret.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: Opaque +data: + kvrocks-password: {{ include "kvrocks.password" . | b64enc | quote }} +{{- end -}} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/hpa.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/hpa.yaml new file mode 100644 index 00000000..51f0f80a --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/hpa.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.replica.autoscaling.enabled .Values.sentinel.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/node-services.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/node-services.yaml new file mode 100644 index 00000000..9ed339ba --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/node-services.yaml @@ -0,0 +1,88 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (or .Release.IsUpgrade .Values.sentinel.service.nodePorts.kvrocks ) }} + +{{- range $i := until (int .Values.replica.replicaCount) }} + +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" $ ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $kvrocksport := 0}} +{{ $proxyport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "sentinel") }} +{{ $kvrocksport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "kvrocks") }} +{{ $proxyport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "proxy") }} +{{- else }} +{{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" $ }}-node-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: node + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $.Values.sentinel.service.annotations $.Values.commonAnnotations }} + annotations: + {{- if $.Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: NodePort + ports: + - name: sentinel + {{- if $.Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + {{- else }} + nodePort: {{ $sentinelport }} + port: {{ $sentinelport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: proxy + {{- if $.Values.sentinel.service.nodePorts.proxy }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.proxy $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.proxy $i 1) }} + {{- else }} + nodePort: {{ $proxyport }} + port: {{ $proxyport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.proxy.containerPorts.proxy }} + - name: kvrocks + {{- if $.Values.sentinel.service.nodePorts.kvrocks }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.kvrocks $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.kvrocks $i 1) }} + {{- else }} + nodePort: {{ $kvrocksport }} + port: {{ $kvrocksport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.kvrocks }} + - name: sentinel-internal + nodePort: null + port: {{ $.Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: proxy-internal + nodePort: null + port: {{ $.Values.proxy.containerPorts.proxy }} + protocol: TCP + targetPort: {{ $.Values.proxy.containerPorts.proxy }} + - name: kvrocks-internal + nodePort: null + port: {{ $.Values.replica.containerPorts.kvrocks }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.kvrocks }} + selector: + statefulset.kubernetes.io/pod-name: {{ template "common.names.fullname" $ }}-node-{{ $i }} +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/ports-configmap.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/ports-configmap.yaml new file mode 100644 index 00000000..2434cf5d --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/ports-configmap.yaml @@ -0,0 +1,100 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Values.sentinel.service.nodePorts.kvrocks ) }} +{{- /* create a list to keep track of ports we choose to use */}} +{{ $chosenports := (list ) }} + +{{- /* Get list of all used nodeports */}} +{{ $usedports := (list ) }} +{{- range $index, $service := (lookup "v1" "Service" "" "").items }} + {{- range.spec.ports }} + {{- if .nodePort }} + {{- $usedports = (append $usedports .nodePort) }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* +comments that start with # are rendered in the output when you debug, so you can less and search for them +Vars in the comment will be rendered out, so you can check their value this way. +https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments + +remove the template comments and leave the yaml comments to help debug +*/}} + +{{- /* Sort the list */}} +{{ $usedports = $usedports | sortAlpha }} +#usedports {{ $usedports }} + +{{- /* How many nodeports per service do we want to create, except for the main service which is always two */}} +{{ $numberofPortsPerNodeService := 2 }} + +{{- /* for every nodeport we want, loop though the used ports to get an unused port */}} +{{- range $j := until (int (add (mul (int .Values.replica.replicaCount) $numberofPortsPerNodeService) 2)) }} + {{- /* #j={{ $j }} */}} + {{- $nodeport := (add $j 30000) }} + {{- $nodeportfound := false }} + {{- range $i := $usedports }} + {{- /* #i={{ $i }} + #nodeport={{ $nodeport }} + #usedports={{ $usedports }} */}} + {{- if and (has (toString $nodeport) $usedports) (eq $nodeportfound false) }} + {{- /* nodeport conflicts with in use */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if and ( has $nodeport $chosenports) (eq $nodeportfound false) }} + {{- /* nodeport already chosen, try another */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if (eq $nodeportfound false) }} + {{- /* nodeport free to use: not already claimed and not in use */}} + {{- /* select nodeport, and place into usedports */}} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- $nodeportfound = true }} + {{- else }} + {{- /* nodeport has already been chosen and locked in, just work through the rest of the list to get to the next nodeport selection */}} + {{- end }} + {{- end }} + {{- if (eq $nodeportfound false) }} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- end }} + +{{- end }} + +{{- /* print the usedports and chosenports for debugging */}} +#usedports {{ $usedports }} +#chosenports {{ $chosenports }}}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-ports-configmap + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} +{{- if $portsmap }} +{{- /* configmap already exists, do not install again */ -}} + {{- range $name, $value := $portsmap }} + "{{ $name }}": "{{ $value }}" + {{- end }} +{{- else }} +{{- /* configmap being set for first time */ -}} + {{- range $index, $port := $chosenports }} + {{- $nodenumber := (floor (div $index 2)) }} + {{- if (eq $index 0) }} + "{{ template "common.names.fullname" $ }}-sentinel": "{{ $port }}" + {{- else if (eq $index 1) }} + "{{ template "common.names.fullname" $ }}-kvrocks": "{{ $port }}" + {{- else if (eq (mod $index 2) 0) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-sentinel": "{{ $port }}" + {{- else if (eq (mod $index 2) 1) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-kvrocks": "{{ $port }}" + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/service.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/service.yaml new file mode 100644 index 00000000..10316d26 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/service.yaml @@ -0,0 +1,114 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.kvrocks -}} + +--- +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $proxyport := 0}} +{{ $kvrocksport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "sentinel") }} +{{ $proxyport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "proxy") }} +{{ $kvrocksport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "kvrocks") }} +{{- else }} +{{- end }} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.sentinel.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{- if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") .Values.sentinel.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.sentinel.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "ClusterIP") .Values.sentinel.service.clusterIP }} + clusterIP: {{ .Values.sentinel.service.clusterIP }} + {{- end }} + ports: + - name: tcp-kvrocks + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.kvrocks }} + port: {{ .Values.sentinel.service.nodePorts.kvrocks }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $kvrocksport }} + {{- else}} + port: {{ .Values.sentinel.service.ports.kvrocks }} + {{- end }} + targetPort: {{ .Values.replica.containerPorts.kvrocks }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.kvrocks }} + nodePort: {{ .Values.sentinel.service.nodePorts.kvrocks }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $kvrocksport }} + {{- end }} + - name: tcp-sentinel + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + port: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $sentinelport }} + {{- else }} + port: {{ .Values.sentinel.service.ports.sentinel }} + {{- end }} + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $sentinelport }} + {{- end }} + - name: tcp-proxy + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.proxy }} + port: {{ .Values.sentinel.service.nodePorts.proxy }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $proxyport }} + {{- else }} + port: {{ .Values.sentinel.service.ports.proxy }} + {{- end }} + targetPort: {{ .Values.proxy.containerPorts.proxy }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.proxy }} + nodePort: {{ .Values.sentinel.service.nodePorts.proxy }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $proxyport }} + {{- end }} + {{- if eq .Values.sentinel.service.type "NodePort" }} + - name: sentinel-internal + nodePort: null + port: {{ .Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + - name: kvrocks-internal + nodePort: null + port: {{ .Values.replica.containerPorts.kvrocks }} + protocol: TCP + targetPort: {{ .Values.replica.containerPorts.kvrocks }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: node +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/statefulset.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/statefulset.yaml new file mode 100644 index 00000000..4fa17bea --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/sentinel/statefulset.yaml @@ -0,0 +1,654 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.kvrocks -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replica.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: node + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: node + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kvrocks.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "kvrocks.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kvrocks.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "node" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "node" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.sentinel.terminationGracePeriodSeconds }} + containers: + - name: kvrocks + image: {{ template "kvrocks.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/drycc/scripts/start-scripts/start-node.sh + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KVROCKS_CLUSTER_ENABLED + value: "no" + - name: KVROCKS_PORT + value: {{ .Values.replica.containerPorts.kvrocks | quote }} + - name: KVROCKS_BIND + value: "0.0.0.0" + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + - name: KVROCKS_MASTERAUTH + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + - name: KVROCKS_DIR + value: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: kvrocks + containerPort: {{ .Values.replica.containerPorts.kvrocks }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kvrocks + {{- else if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- else if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/drycc/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.sentinel.persistence.enabled }} + - name: sentinel-data + mountPath: /opt/drycc/redis-sentinel/etc + {{- end }} + - name: kvrocks-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + - name: config + mountPath: /opt/drycc/kvrocks/mounted-etc + - name: kvrocks-tmp-conf + mountPath: /opt/drycc/kvrocks/etc + - name: tmp + mountPath: /tmp + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - init-stack + - /bin/bash + - -c + - /opt/drycc/scripts/start-scripts/prestop-kvrocks.sh + - name: sentinel + image: {{ template "kvrocks.sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.sentinel.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.sentinel.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.sentinel.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.sentinel.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/drycc/scripts/start-scripts/start-sentinel.sh + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.sentinel.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + - name: KVROCKS_MASTERAUTH + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: SENTINEL_TLS_ENABLED + value: "no" + - name: SENTINEL_PORT + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: sentinel + containerPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: sentinel + {{- else if .Values.sentinel.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - init-stack + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + lifecycle: + preStop: + exec: + command: + - init-stack + - /bin/bash + - -c + - /opt/drycc/scripts/start-scripts/prestop-sentinel.sh + {{- end }} + {{- if .Values.sentinel.resources }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/drycc/scripts/start-scripts + - name: health + mountPath: /health + - name: sentinel-data + mountPath: /opt/drycc/redis-sentinel/etc + - name: kvrocks-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + - name: config + mountPath: /opt/drycc/redis-sentinel/mounted-etc + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.proxy.enabled }} + - name: proxy + image: {{ template "kvrocks.proxy.image" . }} + imagePullPolicy: {{ .Values.proxy.image.pullPolicy | quote }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + args: + - bash + - -ec + - | + /opt/drycc/redis-sentinel/bin/redis-sentinel-proxy \ + -listen :{{ .Values.proxy.containerPorts.proxy }} \ + -master {{ .Values.sentinel.masterSet }} \ + -sentinel-addr ${POD_IP}:{{ .Values.sentinel.containerPorts.sentinel }} \ + -sentinel-pass $(KVROCKS_REQUIREPASS) \ + -sentinel-user "" \ + -max-procs={{ .Values.proxy.maxProcs }} + {{- end }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + - name: KVROCKS_MASTERAUTH + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + ports: + - name: sentinel-proxy + containerPort: {{ .Values.proxy.containerPorts.proxy }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.proxy.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: sentinel-proxy + {{- end }} + {{- if .Values.proxy.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.proxy.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.proxy.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.proxy.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.proxy.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.proxy.livenessProbe.failureThreshold }} + tcpSocket: + port: sentinel-proxy + {{- end }} + {{- if .Values.proxy.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.proxy.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.proxy.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.proxy.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.proxy.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.proxy.readinessProbe.failureThreshold }} + tcpSocket: + port: sentinel-proxy + {{- end }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{- toYaml .Values.proxy.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "kvrocks.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + - -c + - | + kvrocks_exporter --kvrocks.addr=localhost:$(KVROCKS_PORT) {{- if .Values.auth.enabled }} --kvrocks.password $(KVROCKS_REQUIREPASS){{- end }} {{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + - name: KVROCKS_PORT + value: {{ .Values.replica.containerPorts.kvrocks | quote }} + {{- if .Values.auth.enabled }} + - name: KVROCKS_REQUIREPASS + valueFrom: + secretKeyRef: + name: {{ template "kvrocks.secretName" . }} + key: {{ template "kvrocks.secretPasswordKey" . }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "kvrocks.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: kvrocks-data + mountPath: {{ .Values.replica.persistence.path }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "kvrocks.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: config + configMap: + name: {{ include "kvrocks.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + {{- if not .Values.sentinel.persistence.enabled }} + - name: sentinel-data + {{- if .Values.sentinel.persistence.medium }} + emptyDir: { + medium: {{ .Values.sentinel.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + - name: kvrocks-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: kvrocks-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + {{- if .Values.sentinel.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.sentinel.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: kvrocks-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.sentinel.persistence.enabled }} + - metadata: + name: sentinel-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.sentinel.persistence.annotations }} + annotations: {{- toYaml .Values.sentinel.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.sentinel.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.sentinel.persistence.size | quote }} + {{- if .Values.sentinel.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.sentinel.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/serviceaccount.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/serviceaccount.yaml new file mode 100644 index 00000000..45c43411 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "kvrocks.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/servicemonitor.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/servicemonitor.yaml new file mode 100644 index 00000000..0d94a4b4 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/templates/servicemonitor.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/values.schema.json b/addons/kvrocks/2.10/chart/kvrocks-2.10/values.schema.json new file mode 100644 index 00000000..ba917820 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "kvrocks architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Use password authentication" + }, + "password": { + "type": "string", + "title": "kvrocks password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "kind": { + "type": "string", + "title": "Workload Kind", + "form": true, + "description": "Allowed values: `Deployment` or `StatefulSet`", + "enum": ["Deployment", "StatefulSet"] + }, + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + } + } + } + } + }, + "replica": { + "type": "object", + "title": "kvrocks replicas settings", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + }, + "properties": { + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of kvrocks replicas" + }, + "persistence": { + "type": "object", + "title": "Persistence for kvrocks replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "replica/persistence/enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/addons/kvrocks/2.10/chart/kvrocks-2.10/values.yaml b/addons/kvrocks/2.10/chart/kvrocks-2.10/values.yaml new file mode 100644 index 00000000..17b36c71 --- /dev/null +++ b/addons/kvrocks/2.10/chart/kvrocks-2.10/values.yaml @@ -0,0 +1,1668 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.kvrocks.password Global kvrocks; password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + kvrocks: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section kvrocks; Image parameters +## + +## Drycc kvrocks; image +## ref: https://hub.docker.com/r/drycc-addons/kvrocks/tags/ +## @param image.registry kvrocks; image registry +## @param image.repository kvrocks; image repository +## @param image.tag kvrocks; image tag (immutable tags are recommended) +## @param image.pullPolicy kvrocks; image pull policy +## @param image.pullSecrets kvrocks; image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: registry.drycc.cc + repository: drycc-addons/kvrocks + tag: "2.10" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section kvrocks; common configuration parameters +## https://github.com/drycc-addons/containers/tree/main/containers/kvrocks +## + +## @param architecture kvrocks; architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## kvrocks; Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-kvrocks#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password kvrocks; password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with kvrocks; credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redkvrocksis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://github.com/apache/kvrocks/blob/unstable/kvrocks.conf +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for kvrocks; nodes +## +existingConfigmap: "" + +## @section kvrocks; master configuration parameters +## + +master: + ## @param master.configuration Configuration for kvrocks; master nodes + ## ref: https://kvrocks.io/topics/config + ## + configuration: "" + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.preExecCmds Additional commands to run prior to starting kvrocks; master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for kvrocks; master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to kvrocks; master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for kvrocks; master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for kvrocks; master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.kvrocks Container port to open on kvrocks; master nodes + ## + containerPorts: + kvrocks: 6379 + ## Configure extra options for kvrocks; containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on kvrocks; master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on kvrocks; master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on kvrocks; master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the kvrocks; master containers + ## @param master.resources.requests The requested resources for the kvrocks; master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled kvrocks; master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set kvrocks; master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled kvrocks; master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set kvrocks; master containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for kvrocks; master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type kvrocks; master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param master.priorityClassName kvrocks; master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases kvrocks; master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for kvrocks; master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for kvrocks; master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in kvrocks; master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for kvrocks; master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for kvrocks; master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for kvrocks; master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for kvrocks; master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for kvrocks; master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for kvrocks; master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + dnsConfig: {} + ## @param master.lifecycleHooks for the kvrocks; master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the kvrocks; master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the kvrocks; master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the kvrocks; master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the kvrocks; master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on kvrocks; master containers + ## NOTE: Useful when using different kvrocks; images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on kvrocks; master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Rkvrocksedis; master service parameters + ## + service: + ## @param master.service.type kvrocks; master service type + ## + type: ClusterIP + ## @param master.service.ports.kvrocks kvrocks; master service port + ## + ports: + kvrocks: 6379 + ## @param master.service.nodePorts.kvrocks Node port for kvrocks; master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + kvrocks: "" + ## @param master.service.externalTrafficPolicy kvrocks; master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.clusterIP kvrocks; master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP kvrocks; master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges kvrocks; master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.annotations Additional custom annotations for kvrocks; master service + ## + annotations: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the kvrocks-master pods + ## + terminationGracePeriodSeconds: 30 + +## @section kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of kvrocks; replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for kvrocks; replicas nodes + ## ref: https://github.com/apache/kvrocks/blob/unstable/kvrocks.conf + ## + configuration: "" + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.preExecCmds Additional commands to run prior to starting kvrocks; replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for kvrocks; replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to kvrocks; replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for kvrocks; replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for kvrocks; replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for kvrocks service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.kvrocks Container port to open on kvrocks; replicas nodes + ## + containerPorts: + kvrocks: 6379 + ## Configure extra options for kvrocks; containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on kvrocks; replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on kvrocks; replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on kvrocks; replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the kvrocks; replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled kvrocks; replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set kvrocks; replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled kvrocks; replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set kvrocks; replicas containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param replica.schedulerName Alternate scheduler for kvrocks; replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type kvrocks; replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param replica.priorityClassName kvrocks; replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases kvrocks; replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for kvrocks; replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for kvrocks; replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in kvrocks; replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for kvrocks; replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for kvrocks; replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for kvrocks; replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for kvrocks; replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for kvrocks; replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for kvrocks; replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + dnsConfig: {} + ## @param replica.lifecycleHooks for the kvrocks; replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the kvrocks; replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the kvrocks; replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the kvrocks; replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the kvrocks; replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.path The path the volume will be mounted at on kvrocks; replicas containers + ## NOTE: Useful when using different kvrocks; images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on kvrocks; replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## kvrocks; replicas service parameters + ## + service: + ## @param replica.service.type kvrocks; replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.kvrocks kvrocks; replicas service port + ## + ports: + kvrocks: 6379 + ## @param replica.service.nodePorts.kvrocks Node port for kvrocks; replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + kvrocks: "" + ## @param replica.service.externalTrafficPolicy kvrocks; replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP kvrocks; replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP kvrocks; replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges kvrocks; replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for kvrocks; replicas service + ## + annotations: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the kvrocks-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + +## @section kvrocks; Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use kvrocks; Sentinel on kvrocks; pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single kvrocks; service exposing both the kvrocks and Sentinel ports + ## + enabled: true + ## Bitnami kvrocks; Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/sentinel/tags/ + ## @param sentinel.image.registry kvrocks; Sentinel image registry + ## @param sentinel.image.repository kvrocks; Sentinel image repository + ## @param sentinel.image.tag kvrocks; Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.pullPolicy kvrocks; Sentinel image pull policy + ## @param sentinel.image.pullSecrets kvrocks; Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + ## todo: support both of amd64 and arm64 + image: + registry: registry.drycc.cc + repository: drycc-addons/redis-sentinel + tag: "7.0" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## NOTE: This is directly related to the startupProbes which are configured to run every 10 seconds for a total of 22 failures. If adjusting this value, also adjust the startupProbes. + getMasterTimeout: 220 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a kvrocks; node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for kvrocks; Sentinel nodes + ## ref: https://kvrocks.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.preExecCmds Additional commands to run prior to starting kvrocks; Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to kvrocks; Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for kvrocks; Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for kvrocks; Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for kvrocks service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on kvrocks; Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for kvrocks; containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on kvrocks; Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on kvrocks; Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on kvrocks; Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the kvrocks; Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled kvrocks; Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set kvrocks; Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param sentinel.lifecycleHooks for the kvrocks; sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the kvrocks; Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the kvrocks; Sentinel container(s) + ## + extraVolumeMounts: [] + ## kvrocks; Sentinel service parameters + ## + service: + ## @param sentinel.service.type kvrocks; Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.kvrocks kvrocks; service port for kvrocks; + ## @param sentinel.service.ports.sentinel kvrocks; service port for kvrocks; Sentinel + ## + ports: + kvrocks: 6379 + sentinel: 26379 + proxy: 36379 + ## @param sentinel.service.nodePorts.kvrocks Node port for kvrocks; + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.kvrocks and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + kvrocks: "" + proxy: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy kvrocks; Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP kvrocks; Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP kvrocks; Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges kvrocks; Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for kvrocks; Sentinel service + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the kvrocks-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section kvrocks; Sentinel configuration parameters +## + +proxy: + ## @param sentinel.enabled Use kvrocks; Sentinel on kvrocks; pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single kvrocks; service exposing both the kvrocks and Sentinel ports + ## + enabled: true + ## Bitnami kvrocks; Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/sentinel/tags/ + ## @param sentinel.image.registry kvrocks; Sentinel image registry + ## @param sentinel.image.repository kvrocks; Sentinel image repository + ## @param sentinel.image.tag kvrocks; Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.pullPolicy kvrocks; Sentinel image pull policy + ## @param sentinel.image.pullSecrets kvrocks; Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + ## todo: support both of amd64 and arm64 + image: + registry: registry.drycc.cc + repository: drycc-addons/redis-sentinel + tag: "7.0" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + # max-procs + maxProcs: 1 + ## @param sentinel.preExecCmds Additional commands to run prior to starting kvrocks; Sentinel + ## + preExecCmds: [] + ## Configure extra options for kvrocks; containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on kvrocks; Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on kvrocks; Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on kvrocks; Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the kvrocks; Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled kvrocks; Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set kvrocks; Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param proxy.containerPorts.proxy Container port to open on kvrocks; Sentinel nodes + ## + containerPorts: + proxy: 36379 + +## @section Other Parameters +## + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## kvrocks; is listening on. When true, kvrocks; will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + allowCurrentNamespace: true + allowNamespaces: [] + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## kvrocks; Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose kvrocks; metrics + ## + enabled: false + ## Bitnami kvrocks; Exporter image + ## ref: https://hub.docker.com/r/bitnami/kvrocks-exporter/tags/ + ## @param metrics.image.registry kvrocks; Exporter image registry + ## @param metrics.image.repository kvrocks; Exporter image repository + ## @param metrics.image.tag kvrocks; kvrocks; Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy kvrocks; Exporter image pull policy + ## @param metrics.image.pullSecrets kvrocks; Exporter image pull secrets + ## + ## todo: support both of amd64 and arm64 + image: + registry: registry.drycc.cc + repository: drycc-addons/kvrocks + tag: "2.10" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.kvrocksTargetHost A way to specify an alternative kvrocks; hostname + ## Useful for certificate CN/SAN matching + ## + kvrocksTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for kvrocks; exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to kvrocks; exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled kvrocks; exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set kvrocks; exporter containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the kvrocks; metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the kvrocks; metrics sidecar + ## + extraVolumeMounts: [] + ## kvrocks; exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the kvrocks; exporter container + ## @param metrics.resources.requests The requested resources for the kvrocks; exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for kvrocks; exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for kvrocks; exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## kvrocks; exporter service parameters + ## + service: + ## @param metrics.service.type kvrocks; exporter service type + ## + type: ClusterIP + ## @param metrics.service.port kvrocks; exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy kvrocks; exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP kvrocks; exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges kvrocks; exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for kvrocks; exporter service + ## + annotations: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: kvrocksDown + ## expr: kvrocks_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: kvrocks; instance {{ "{{ $labels.instance }}" }} down + ## description: kvrocks; instance {{ "{{ $labels.instance }}" }} is down + ## - alert: kvrocksMemoryHigh + ## expr: > + ## kvrocks_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## kvrocks_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: kvrocks; instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## kvrocks; instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: kvrocksKeyEviction + ## expr: | + ## increase(kvrocks_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: kvrocks; instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## kvrocks; instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param volumePermissions.image.registry Bitnami Shell image registry + ## @param volumePermissions.image.repository Bitnami Shell image repository + ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy + ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: "bookworm" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param sysctl.image.registry Bitnami Shell image registry + ## @param sysctl.image.repository Bitnami Shell image repository + ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy + ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: "bookworm" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/addons/kvrocks/2.10/meta.yaml b/addons/kvrocks/2.10/meta.yaml new file mode 100644 index 00000000..5029ca8d --- /dev/null +++ b/addons/kvrocks/2.10/meta.yaml @@ -0,0 +1,57 @@ +name: kvrocks-2.10 +version: "2.10" +id: 1fb81154-606f-4b54-9aab-5c3aa429b8b2 +description: "kvrocks-2.10." +displayName: "kvrocks-2.10" +metadata: + displayName: "kvrocks-2.10" + provider: + name: drycc + supportURL: https://kvrocks.apache.org/ + documentationURL: https://github.com/drycc-addons/addons/tree/main/addons/kvrocks/2.10/chart/kvrocks +tags: kvrocks +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "auth.password" + required: false + description: "auth.password config for values.yaml" +- name: "commonConfiguration" + required: false + description: "commonConfiguration config for values.yaml" +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" +- name: "master.service.type" + required: false + description: "master service type config for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" +- name: "replica.nodeSelector" + required: false + description: "replica nodeSelector config for values.yaml" +- name: "replica.service.type" + required: false + description: "replica service type config for values.yaml" +- name: "sentinel.service.type" + required: false + description: "sentinel service type config for values.yaml" +- name: "sentinel.extraEnvVars" + required: false + description: "sentinel extraEnvVars config for values.yaml" +- name: "sentinel.enabled" + required: false + description: "sentinel enabled type config for values.yaml" +- name: "proxy.enabled" + required: false + description: "proxy enabled type config for values.yaml" +- name: "metrics.enabled" + required: false + description: "metrics enabled or not config for values.yaml" +archive: false diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/bind.yaml b/addons/kvrocks/2.10/plans/standard-16c32g1024/bind.yaml new file mode 100644 index 00000000..4f3acf2a --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-16c32g1024/bind.yaml @@ -0,0 +1,105 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + + - name: KVROCKS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: KVROCKS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/meta.yaml b/addons/kvrocks/2.10/plans/standard-16c32g1024/meta.yaml new file mode 100644 index 00000000..adfd568e --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-16c32g1024/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c32g1024" +id: 7ea1ccde-3495-4388-8160-c2b6b443a938 +description: "Kvrocks standard-16c32g1024 plan which limit resources 16 core, memory size 32Gi and persistence size 1024Gi." +displayName: "standard-16c32g1024" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml b/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml new file mode 100644 index 00000000..52424a5b --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml @@ -0,0 +1,138 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kvrocks-standard-16c32g1024 + +## @section Kvrocks; master configuration parameters +## + +master: + ## Kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Kvrocks; master containers + ## @param master.resources.requests The requested resources for the Kvrocks; master containers + ## + resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 8 + memory: 16Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Kvrocks; master containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 1Ti + +## @section Kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Kvrocks; replicas to deploy + ## + replicaCount: 3 + + ## Kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the Kvrocks; replicas containers + ## + resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 8 + memory: 16Gi + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Kvrocks; replicas containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 1Ti + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 128Mi + + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +proxy: + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param proxy.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/bind.yaml b/addons/kvrocks/2.10/plans/standard-1c2g64/bind.yaml new file mode 100644 index 00000000..4f3acf2a --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-1c2g64/bind.yaml @@ -0,0 +1,105 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + + - name: KVROCKS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: KVROCKS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/meta.yaml b/addons/kvrocks/2.10/plans/standard-1c2g64/meta.yaml new file mode 100644 index 00000000..40f92b0b --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-1c2g64/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g64" +id: 738baa82-8f6c-46cc-a4ea-7860cc5e02ce +description: "Kvrocks standard-1c2g64 plan which limit resources 1 core, memory size 2Gi and persistence size 64Gi." +displayName: "standard-1c2g64" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml b/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml new file mode 100644 index 00000000..80019be2 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml @@ -0,0 +1,139 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kvrocks-standard-1c2g64 + +## @section Kvrocks; master configuration parameters +## + +master: + ## Kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Kvrocks; master containers + ## @param master.resources.requests The requested resources for the Kvrocks; master containers + ## + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Kvrocks; master containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 64Gi + +## @section Kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Kvrocks; replicas to deploy + ## + replicaCount: 3 + + ## Kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the Kvrocks; replicas containers + ## + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Kvrocks; replicas containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 64Gi + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 128Mi + + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +proxy: + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param proxy.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/bind.yaml b/addons/kvrocks/2.10/plans/standard-2c4g128/bind.yaml new file mode 100644 index 00000000..4f3acf2a --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-2c4g128/bind.yaml @@ -0,0 +1,105 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + + - name: KVROCKS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: KVROCKS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/meta.yaml b/addons/kvrocks/2.10/plans/standard-2c4g128/meta.yaml new file mode 100644 index 00000000..eefa326b --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-2c4g128/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g128" +id: 4e315859-7147-4b81-b923-987de24beef6 +description: "Kvrocks standard-2c4g128 plan which limit resources 2 core, memory size 4Gi and persistence size 128Gi." +displayName: "standard-2c4g128" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml b/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml new file mode 100644 index 00000000..4fd489b1 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml @@ -0,0 +1,139 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kvrocks-standard-2c4g128 + +## @section Kvrocks; master configuration parameters +## + +master: + ## Kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Kvrocks; master containers + ## @param master.resources.requests The requested resources for the Kvrocks; master containers + ## + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Kvrocks; master containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 128Gi + +## @section Kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Kvrocks; replicas to deploy + ## + replicaCount: 3 + + ## Kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the Kvrocks; replicas containers + ## + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Kvrocks; replicas containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 128Gi + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 128Mi + + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +proxy: + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param proxy.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/bind.yaml b/addons/kvrocks/2.10/plans/standard-4c8g256/bind.yaml new file mode 100644 index 00000000..4f3acf2a --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-4c8g256/bind.yaml @@ -0,0 +1,105 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + + - name: KVROCKS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: KVROCKS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/meta.yaml b/addons/kvrocks/2.10/plans/standard-4c8g256/meta.yaml new file mode 100644 index 00000000..00c7904e --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-4c8g256/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g256" +id: 7dc09ac3-7d74-4835-a307-848ffb09a96a +description: "Kvrocks standard-4c8g256 plan which limit resources 4 core, memory size 8Gi and persistence size 256Gi." +displayName: "standard-4c8g256" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml b/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml new file mode 100644 index 00000000..024daf43 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml @@ -0,0 +1,138 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kvrocks-standard-4c8g256 + +## @section Kvrocks; master configuration parameters +## + +master: + ## Kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Kvrocks; master containers + ## @param master.resources.requests The requested resources for the Kvrocks; master containers + ## + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 2 + memory: 4Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Kvrocks; master containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 256Gi + +## @section Kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Kvrocks; replicas to deploy + ## + replicaCount: 3 + + ## Kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the Kvrocks; replicas containers + ## + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 2 + memory: 4Gi + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Kvrocks; replicas containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 256Gi + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 128Mi + + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +proxy: + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param proxy.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/bind.yaml b/addons/kvrocks/2.10/plans/standard-8c16g512/bind.yaml new file mode 100644 index 00000000..4f3acf2a --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-8c16g512/bind.yaml @@ -0,0 +1,105 @@ +credential: + {{- if not .Values.sentinel.enabled }} + {{ if (eq .Values.master.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + + - name: KVROCKS_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} + {{ if (eq .Values.replica.service.type "LoadBalancer") }} + - name: EXTERNAL_REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: REPLICAS_DOMAIN + value: {{ printf "%s-replicas" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: REPLICAS_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: REPLICAS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-kvrocks")].port }' + {{- end }} + + {{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + {{ if (eq .Values.sentinel.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: SENTINEL_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-sentinel")].port }' + + {{- if.Values.proxy.enabled }} + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + {{- end }} + {{- end }} + + {{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + - name: KVROCKS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} + + {{- if and .Values.auth.enabled .Values.auth.sentinel (not .Values.auth.existingSecret) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.kvrocks-password }' + {{- end }} diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/meta.yaml b/addons/kvrocks/2.10/plans/standard-8c16g512/meta.yaml new file mode 100644 index 00000000..3b5f7716 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-8c16g512/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g512" +id: ba00c618-527c-457b-abb5-7ceb7984de57 +description: "Kvrocks standard-4c8g256 plan which limit resources 8 core, memory size 16Gi and persistence size 512Gi." +displayName: "standard-8c16g512" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml b/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml new file mode 100644 index 00000000..5d563525 --- /dev/null +++ b/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml @@ -0,0 +1,138 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kvrocks-standard-8c16g512 + +## @section Kvrocks; master configuration parameters +## + +master: + ## Kvrocks; master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Kvrocks; master containers + ## @param master.resources.requests The requested resources for the Kvrocks; master containers + ## + resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 4 + memory: 8Gi + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Kvrocks; master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.path The path the volume will be mounted at on Kvrocks; master containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 512Gi + +## @section Kvrocks; replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Kvrocks; replicas to deploy + ## + replicaCount: 3 + + ## Kvrocks; replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Kvrocks; replicas containers + ## @param replica.resources.requests The requested resources for the Kvrocks; replicas containers + ## + resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 4 + memory: 8Gi + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Kvrocks; replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.path The path the volume will be mounted at on Kvrocks; replicas containers + ## NOTE: Useful when using different Kvrocks; images + ## + path: /drycc/kvrocks/data + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 512Gi + +sentinel: + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Kvrocks; sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: true + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 128Mi + + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +proxy: + ## Kvrocks; Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the Kvrocks; Sentinel containers + ## @param proxy.resources.requests The requested resources for the Kvrocks; Sentinel containers + ## + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi \ No newline at end of file From ff35d9f25d25aaef59bafbf1f7d4ec05a63f38a8 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Wed, 26 Feb 2025 17:55:26 +0800 Subject: [PATCH 39/93] chore(minio): update standard-v4s3096 plan --- addons/minio/2023/plans/standard-v4s3096/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/minio/2023/plans/standard-v4s3096/values.yaml b/addons/minio/2023/plans/standard-v4s3096/values.yaml index 5f2ee8d3..64b0212d 100644 --- a/addons/minio/2023/plans/standard-v4s3096/values.yaml +++ b/addons/minio/2023/plans/standard-v4s3096/values.yaml @@ -45,4 +45,4 @@ persistence: - ReadWriteOnce ## @param persistence.size PVC Storage Request for MinIO® data volume ## - size: 2Ti + size: 3Ti From 0c1dc4ca5a88b1d9edd2a40b567429a266479dfa Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 3 Mar 2025 13:30:55 +0800 Subject: [PATCH 40/93] feat(rabbitmq): support 4.0 version --- addons/index.yaml | 2 + addons/rabbitmq/3.12/meta.yaml | 3 + .../3.12/plans/standard-16c32g3w/bind.yaml | 9 + .../3.12/plans/standard-2c4g3w/bind.yaml | 9 + .../3.12/plans/standard-4c8g3w/bind.yaml | 9 + .../3.12/plans/standard-8c16g3w/bind.yaml | 9 + .../4.0/chart/rabbitmq-4.0/Chart.yaml | 23 + .../rabbitmq/4.0/chart/rabbitmq-4.0/README.md | 712 +++++++ .../chart/rabbitmq-4.0/templates/NOTES.txt | 163 ++ .../chart/rabbitmq-4.0/templates/_helpers.tpl | 267 +++ .../rabbitmq-4.0/templates/config-secret.yaml | 21 + .../rabbitmq-4.0/templates/extra-list.yaml | 4 + .../templates/ingress-tls-secrets.yaml | 44 + .../chart/rabbitmq-4.0/templates/ingress.yaml | 54 + .../templates/init-configmap.yaml | 13 + .../rabbitmq-4.0/templates/networkpolicy.yaml | 105 ++ .../4.0/chart/rabbitmq-4.0/templates/pdb.yaml | 21 + .../templates/prometheusrule.yaml | 20 + .../chart/rabbitmq-4.0/templates/role.yaml | 21 + .../rabbitmq-4.0/templates/rolebinding.yaml | 18 + .../chart/rabbitmq-4.0/templates/secrets.yaml | 59 + .../templates/serviceaccount.yaml | 16 + .../templates/servicemonitor.yaml | 134 ++ .../rabbitmq-4.0/templates/statefulset.yaml | 524 ++++++ .../rabbitmq-4.0/templates/svc-headless.yaml | 40 + .../4.0/chart/rabbitmq-4.0/templates/svc.yaml | 106 ++ .../rabbitmq-4.0/templates/tls-secrets.yaml | 31 + .../rabbitmq-4.0/templates/validation.yaml | 2 + .../4.0/chart/rabbitmq-4.0/values.schema.json | 100 + .../4.0/chart/rabbitmq-4.0/values.yaml | 1633 +++++++++++++++++ addons/rabbitmq/4.0/meta.yaml | 51 + .../4.0/plans/standard-16c32g3w/bind.yaml | 55 + .../create-instance-schema.json | 12 + .../4.0/plans/standard-16c32g3w/meta.yaml | 6 + .../4.0/plans/standard-16c32g3w/values.yaml | 48 + .../4.0/plans/standard-2c4g3w/bind.yaml | 55 + .../create-instance-schema.json | 12 + .../4.0/plans/standard-2c4g3w/meta.yaml | 6 + .../4.0/plans/standard-2c4g3w/values.yaml | 48 + .../4.0/plans/standard-4c8g3w/bind.yaml | 55 + .../create-instance-schema.json | 12 + .../4.0/plans/standard-4c8g3w/meta.yaml | 6 + .../4.0/plans/standard-4c8g3w/values.yaml | 48 + .../4.0/plans/standard-8c16g3w/bind.yaml | 55 + .../create-instance-schema.json | 12 + .../4.0/plans/standard-8c16g3w/meta.yaml | 6 + .../4.0/plans/standard-8c16g3w/values.yaml | 48 + 47 files changed, 4707 insertions(+) create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/Chart.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/README.md create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/NOTES.txt create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/_helpers.tpl create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/config-secret.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/extra-list.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress-tls-secrets.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/init-configmap.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/networkpolicy.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/pdb.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/prometheusrule.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/role.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/rolebinding.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/secrets.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/serviceaccount.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/servicemonitor.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/statefulset.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc-headless.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/tls-secrets.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/validation.yaml create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.schema.json create mode 100644 addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.yaml create mode 100644 addons/rabbitmq/4.0/meta.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-16c32g3w/bind.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json create mode 100644 addons/rabbitmq/4.0/plans/standard-16c32g3w/meta.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-2c4g3w/bind.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json create mode 100644 addons/rabbitmq/4.0/plans/standard-2c4g3w/meta.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-4c8g3w/bind.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json create mode 100644 addons/rabbitmq/4.0/plans/standard-4c8g3w/meta.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-8c16g3w/bind.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json create mode 100644 addons/rabbitmq/4.0/plans/standard-8c16g3w/meta.yaml create mode 100644 addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index 33cbbd9c..010b878c 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -15,6 +15,8 @@ entries: rabbitmq: - version: 3.12 description: "RabbitMQ is the most widely deployed open source message broker." + - version: "4.0" + description: "RabbitMQ is the most widely deployed open source message broker." redis: - version: 7.0 description: "Redis is an advanced key-value cache and store." diff --git a/addons/rabbitmq/3.12/meta.yaml b/addons/rabbitmq/3.12/meta.yaml index 7d2e5f0c..814c200f 100644 --- a/addons/rabbitmq/3.12/meta.yaml +++ b/addons/rabbitmq/3.12/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "auth" + required: false + description: "auth config for values.yaml" - name: "nodeSelector" required: false description: "nodeSelector config for values.yaml" diff --git a/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml index a17a74d4..6ca9efc1 100644 --- a/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-16c32g3w/bind.yaml @@ -44,3 +44,12 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .data.rabbitmq-password }' {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml index a17a74d4..6ca9efc1 100644 --- a/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-2c4g3w/bind.yaml @@ -44,3 +44,12 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .data.rabbitmq-password }' {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml index a17a74d4..6ca9efc1 100644 --- a/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-4c8g3w/bind.yaml @@ -44,3 +44,12 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .data.rabbitmq-password }' {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml b/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml index a17a74d4..6ca9efc1 100644 --- a/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml +++ b/addons/rabbitmq/3.12/plans/standard-8c16g3w/bind.yaml @@ -44,3 +44,12 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .data.rabbitmq-password }' {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/Chart.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/Chart.yaml new file mode 100644 index 00000000..2f154725 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: "4.0" +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.4 +description: RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/bitnami/charts +name: rabbitmq +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq +version: 15.3.3 diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/README.md b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/README.md new file mode 100644 index 00000000..99ad126c --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/README.md @@ -0,0 +1,712 @@ + + +# RabbitMQ packaged by Bitnami + +RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). + +[Overview of RabbitMQ](https://www.rabbitmq.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabbitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.10.8-debian-11-r4` | +| `image.digest` | RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `dnsPolicy` | DNS Policy for pod | `""` | +| `dnsConfig` | DNS Configuration pod | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` | `autoheal` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.file` | Name of the definitions file | `/app/load_definition.json` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `lifecycleHooks` | Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `containerPorts.amqp` | | `5672` | +| `containerPorts.amqpTls` | | `5671` | +| `containerPorts.dist` | | `25672` | +| `containerPorts.manager` | | `15672` | +| `containerPorts.epmd` | | `4369` | +| `containerPorts.metrics` | | `9419` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.uri` | LDAP connection string. | `""` | +| `ldap.servers` | List of LDAP servers hostnames. This is valid only if ldap.uri is not set | `[]` | +| `ldap.port` | LDAP servers port. This is valid only if ldap.uri is not set | `""` | +| `ldap.userDnPattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind. | `""` | +| `ldap.binddn` | DN of the account used to search in the LDAP server. | `""` | +| `ldap.bindpw` | Password for binddn account. | `""` | +| `ldap.basedn` | Base DN path where binddn account will search for the users. | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.authorisationEnabled` | Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings | `false` | +| `ldap.tls.enabled` | Enabled TLS configuration. | `false` | +| `ldap.tls.startTls` | Use STARTTLS instead of LDAPS. | `false` | +| `ldap.tls.skipVerify` | Skip any SSL verification (hostanames or certificates) | `false` | +| `ldap.tls.verify` | Verify connection. Valid values are 'verify_peer' or 'verify_none' | `verify_peer` | +| `ldap.tls.certificatesMountPath` | Where LDAP certifcates are mounted. | `/opt/drycc/rabbitmq/ldap/certs` | +| `ldap.tls.certificatesSecret` | Secret with LDAP certificates. | `""` | +| `ldap.tls.CAFilename` | CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certFilename` | Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certKeyFilename` | Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategy.type` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set RabbitMQ pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled RabbitMQ containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set RabbitMQ containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set RabbitMQ container's Security Context runAsNonRoot | `true` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `20` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------ | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` | +| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ------------------------------------------------ | -------------------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessModes` | PVC Access Modes for RabbitMQ data volume | `["ReadWriteOnce"]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.mountPath` | The path the volume will be mounted at | `/bitnami/rabbitmq/mnesia` | +| `persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.distPortEnabled` | Erlang distribution server port | `true` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.epmdPortEnabled` | RabbitMQ EPMD Discovery service port | `true` | +| `service.ports.amqp` | Amqp service port | `5672` | +| `service.ports.amqpTls` | Amqp TLS service port | `5671` | +| `service.ports.dist` | Erlang distribution service port | `25672` | +| `service.ports.manager` | RabbitMQ Manager service port | `15672` | +| `service.ports.metrics` | RabbitMQ Prometheues metrics service port | `9419` | +| `service.ports.epmd` | EPMD Discovery service port | `4369` | +| `service.portNames.amqp` | Amqp service port name | `amqp` | +| `service.portNames.amqpTls` | Amqp TLS service port name | `amqp-ssl` | +| `service.portNames.dist` | Erlang distribution service port name | `dist` | +| `service.portNames.manager` | RabbitMQ Manager service port name | `http-stats` | +| `service.portNames.metrics` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.portNames.epmd` | EPMD Discovery service port name | `epmd` | +| `service.nodePorts.amqp` | Node port for Ampq | `""` | +| `service.nodePorts.amqpTls` | Node port for Ampq TLS | `""` | +| `service.nodePorts.dist` | Node port for Erlang distribution | `""` | +| `service.nodePorts.manager` | Node port for RabbitMQ Manager | `""` | +| `service.nodePorts.epmd` | Node port for EPMD Discovery | `""` | +| `service.nodePorts.metrics` | Node port for RabbitMQ Prometheues metrics | `""` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.annotations` | Extra annotations for the ServiceMonitor | `{}` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r38` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + my-repo/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml my-repo/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Advanced logging + +In case you want to configure RabbitMQ logging set `logs` value to false and set the log config in extraConfiguration following the [official documentation](https://www.rabbitmq.com/logging.html#log-file-location). + +An example: + +```yaml +logs: false # custom logging +extraConfiguration: | + log.default.level = warning + log.file = false + log.console = true + log.console.level = warning + log.console.formatter = json +``` + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME my-repo/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME my-repo/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/drycc/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME my-repo/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release my-repo/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 10.0.0 + +This major version changes the default RabbitMQ image from 3.9.x to 3.10.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.9 to 3.10. + +### To 9.0.0 + +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. + + .dist + .manager + .metrics + .epmd + +- `service.port` has been renamed as `service.ports.amqp`. +- `service.portName` has been renamed as `service.portNames.amqp`. +- `service.nodePort`has been renamed as `service.nodePorts.amqp`. +- `service.tlsPort` has been renamed as `service.ports.amqpTls`. +- `service.tlsPortName` has been renamed as `service.portNames.amqpTls`. +- `service.tlsNodePort` has been renamed as `service.nodePorts.amqpTls`. +- `service.epmdPortName` has been renamed as `service.portNames.epmd`. +- `service.epmdNodePort` has been renamed as `service.nodePorts.epmd`. +- `service.distPort` has been renamed as `service.ports.dist`. +- `service.distPortName` has been renamed as `service.portNames.dist`. +- `service.distNodePort` has been renamed as `service.nodePorts.dist`. +- `service.managerPort` has been renamed as `service.ports.manager`. +- `service.managerPortName` has been renamed as `service.portNames.manager`. +- `service.managerNodePort` has been renamed as `service.nodePorts.manager`. +- `service.metricsPort` has been renamed as `service.ports.metrics`. +- `service.metricsPortName` has been renamed as `service.portNames.metrics`. +- `service.metricsNodePort` has been renamed as `service.nodePorts.metrics`. +- `persistence.volumes` has been removed, as it duplicates the parameter `extraVolumes`. +- `ingress.certManager` has been removed. +- `metrics.serviceMonitor.relabellings` has been replaced with `metrics.serviceMonitor.relabelings`, and it sets the field `relabelings` instead of `metricRelabelings`. +- `metrics.serviceMonitor.additionalLabels` has been renamed as `metrics.serviceMonitor.labels` +- `updateStrategyType` has been removed, use the field `updateStrategy` instead, which is interpreted as a template. +- The content of `podSecurityContext` and `containerSecurityContext` have been modified. +- The behavior of VolumePermissions has been modified to not change ownership of '.snapshot' and 'lost+found' +- Introduced the values `ContainerPorts.*`, separating the service and container ports configuration. + +### To 8.21.0 + +This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes. + +See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- The layout of the persistent volumes has changed (if using persistence). Action is required if preserving data through the upgrade is desired: + - The data has moved from `mnesia/` within the persistent volume to the root of the persistent volume + - The `config/` and `schema/` directories within the persistent volume are no longer used + - An init container can be used to move and clean up the peristent volumes. An example can be found [here](https://github.com/bitnami/charts/issues/10913#issuecomment-1169619513). + - Alternately the value `persistence.subPath` can be overridden to be `mnesia` so that the directory layout is consistent with what it was previously. + - Note however that this will leave the unused `config/` and `schema/` directories within the peristent volume forever. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/NOTES.txt b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/NOTES.txt new file mode 100644 index 00000000..f2f8c0d1 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/NOTES.txt @@ -0,0 +1,163 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.ports.amqp .Values.service.ports.amqpTls -}} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/rabbitmq/entrypoint.sh /opt/drycc/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 -d)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $servicePort }} at {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='http-stats')].nodePort}" services {{ include "common.names.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.ports.manager }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.ports.manager }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.manager }}:{{ .Values.service.ports.manager }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.metrics }}:{{ .Values.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- if .Values.metrics.serviceMonitor.enabled }} +Deprecated: `metrics.serviceMonitor.enabled` is deprecated. + Please use `metrics.serviceMonitor.{default/perObject/detailed}` to enable metrics scraping on one of the metrics endpoints. + See: https://www.rabbitmq.com/docs/prometheus#default-endpoint +{{- end }} + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} + +{{- end }} +{{- include "common.warnings.resources" (dict "sections" (list "" "volumePermissions") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/_helpers.tpl b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/_helpers.tpl new file mode 100644 index 00000000..6ccaf931 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/_helpers.tpl @@ -0,0 +1,267 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get RabbitMQ password secret name. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from RabbitMQ secret. +*/}} +{{- define "rabbitmq.secretPasswordKey" -}} + {{- if and .Values.auth.existingPasswordSecret .Values.auth.existingSecretPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.existingSecretPasswordKey $) -}} + {{- else -}} + {{- printf "rabbitmq-password" -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang cookie key to be retrieved from RabbitMQ secret. +*/}} +{{- define "rabbitmq.secretErlangKey" -}} + {{- if and .Values.auth.existingErlangSecret .Values.auth.existingSecretErlangKey -}} + {{- printf "%s" (tpl .Values.auth.existingSecretErlangKey $) -}} + {{- else -}} + {{- printf "rabbitmq-erlang-cookie" -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 or base 10 number system. +Input can be: b | B | k | K | m | M | g | G | Ki | Mi | Gi +Or number without suffix (then the number gets interpreted as bytes) +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} + {{- $si := . -}} + {{- if not (typeIs "string" . ) -}} + {{- $si = int64 $si | toString -}} + {{- end -}} + {{- $bytes := 0 -}} + {{- if or (hasSuffix "B" $si) (hasSuffix "b" $si) -}} + {{- $bytes = $si | trimSuffix "B" | trimSuffix "b" | float64 | floor -}} + {{- else if or (hasSuffix "K" $si) (hasSuffix "k" $si) -}} + {{- $raw := $si | trimSuffix "K" | trimSuffix "k" | float64 -}} + {{- $bytes = mulf $raw (mul 1000) | floor -}} + {{- else if or (hasSuffix "M" $si) (hasSuffix "m" $si) -}} + {{- $raw := $si | trimSuffix "M" | trimSuffix "m" | float64 -}} + {{- $bytes = mulf $raw (mul 1000 1000) | floor -}} + {{- else if or (hasSuffix "G" $si) (hasSuffix "g" $si) -}} + {{- $raw := $si | trimSuffix "G" | trimSuffix "g" | float64 -}} + {{- $bytes = mulf $raw (mul 1000 1000 1000) | floor -}} + {{- else if hasSuffix "Ki" $si -}} + {{- $raw := $si | trimSuffix "Ki" | float64 -}} + {{- $bytes = mulf $raw (mul 1024) | floor -}} + {{- else if hasSuffix "Mi" $si -}} + {{- $raw := $si | trimSuffix "Mi" | float64 -}} + {{- $bytes = mulf $raw (mul 1024 1024) | floor -}} + {{- else if hasSuffix "Gi" $si -}} + {{- $raw := $si | trimSuffix "Gi" | float64 -}} + {{- $bytes = mulf $raw (mul 1024 1024 1024) | floor -}} + {{- else if (mustRegexMatch "^[0-9]+$" $si) -}} + {{- $bytes = $si -}} + {{- else -}} + {{- printf "\n%s is invalid SI quantity\nSuffixes can be: b | B | k | K | m | M | g | G | Ki | Mi | Gi or without any Suffixes" $si | fail -}} + {{- end -}} + {{- $bytes | int64 -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- $userDnPattern := coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} +{{- if or (and (not (gt $serversListLength 0)) (empty .Values.ldap.uri)) (and (not $userDnPattern) (not .Values.ldap.basedn)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers" or "ldap.uri" are mandatory + to configure the connection and "ldap.userDnPattern" or "ldap.basedn" are necessary to lookup the users. Please provide them: + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]=my-ldap-server" \ + --set ldap.port="389" \ + --set ldap.userDnPattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (eq .Values.memoryHighWatermark.type "relative") (not (dig "limits" "memory" "" .Values.resources)) }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations ))) (not .Values.ingress.selfSigned) (not .Values.ingress.existingSecret) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Use the `ingress.existingSecret` to provide your custom TLS certificates. + - Rely on cert-manager to create it by setting the corresponding annotations + - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "rabbitmq.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Get the extraConfigurationExistingSecret secret. +*/}} +{{- define "rabbitmq.extraConfiguration" -}} +{{- if not (empty .Values.extraConfigurationExistingSecret) -}} + {{- include "common.secrets.lookup" (dict "secret" .Values.extraConfigurationExistingSecret "key" "extraConfiguration" "context" $) | b64dec -}} +{{- else -}} + {{- tpl .Values.extraConfiguration . -}} +{{- end -}} +{{- end -}} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/config-secret.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/config-secret.yaml new file mode 100644 index 00000000..8bc1de6a --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/config-secret.yaml @@ -0,0 +1,21 @@ +{{- if or (empty .Values.configurationExistingSecret) .Values.advancedConfiguration }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if empty .Values.configurationExistingSecret }} + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | b64enc | nindent 4 }} + {{- end }} + {{- if .Values.advancedConfiguration }} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | b64enc | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/extra-list.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress-tls-secrets.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress-tls-secrets.yaml new file mode 100644 index 00000000..cce02e05 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress-tls-secrets.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned (not .Values.ingress.existingSecret) }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress.yaml new file mode 100644 index 00000000..ab3c73ba --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/ingress.yaml @@ -0,0 +1,54 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ default (printf "%s-tls" .Values.ingress.hostname | trunc 63 | trimSuffix "-") .Values.ingress.existingSecret }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/init-configmap.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/init-configmap.yaml new file mode 100644 index 00000000..0353d567 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/init-configmap.yaml @@ -0,0 +1,13 @@ +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/networkpolicy.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/networkpolicy.yaml new file mode 100644 index 00000000..6eff81e5 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/networkpolicy.yaml @@ -0,0 +1,105 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.rbac.create }} + # Allow access to kube-apiserver + {{- range $port := .Values.networkPolicy.kubeAPIServerPorts }} + - port: {{ $port }} + {{- end }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.service.ports.epmd }} + - port: {{ .Values.service.ports.amqp }} + - port: {{ .Values.service.ports.amqpTls }} + - port: {{ .Values.service.ports.dist }} + - port: {{ .Values.service.ports.manager }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + {{- if eq .Values.service.type "LoadBalancer" }} + - {} + {{- else }} + # Allow inbound connections to RabbitMQ + - ports: + - port: {{ .Values.containerPorts.epmd }} + - port: {{ .Values.containerPorts.amqp }} + - port: {{ .Values.containerPorts.amqpTls }} + - port: {{ .Values.containerPorts.dist }} + - port: {{ .Values.containerPorts.manager }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.containerPorts.metrics }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.addExternalClientAccess }} + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.networkPolicy.ingressPodMatchLabels }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSMatchLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/pdb.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/pdb.yaml new file mode 100644 index 00000000..fd0a8c02 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/pdb.yaml @@ -0,0 +1,21 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if or .Values.pdb.maxUnavailable (not .Values.pdb.minAvailable) }} + maxUnavailable: {{ .Values.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/prometheusrule.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/prometheusrule.yaml new file mode 100644 index 00000000..ba6993d2 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace | quote}} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "common.names.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/role.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/role.yaml new file mode 100644 index 00000000..c8b5e6af --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/rolebinding.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/rolebinding.yaml new file mode 100644 index 00000000..43e2e4f1 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/secrets.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/secrets.yaml new file mode 100644 index 00000000..3b35f23f --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/secrets.yaml @@ -0,0 +1,59 @@ +{{- $host := printf "%s.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} +{{- $port := print .Values.service.ports.amqp }} +{{- $user := print .Values.auth.username }} +{{- $password := include "common.secrets.passwords.manage" (dict "secret" (include "rabbitmq.secretPasswordName" .) "key" (include "rabbitmq.secretPasswordKey" .) "length" 16 "providedValues" (list "auth.password") "skipB64enc" true "skipQuote" true "honorProvidedValues" true "context" $) }} +{{- $erlangCookie := include "common.secrets.passwords.manage" (dict "secret" (include "rabbitmq.secretErlangName" .) "key" (include "rabbitmq.secretErlangKey" .) "length" 32 "failOnNew" false "providedValues" (list "auth.erlangCookie") "honorProvidedValues" true "context" $) }} +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (not .Values.auth.existingPasswordSecret) }} + rabbitmq-password: {{ print $password | b64enc | quote }} + {{- end }} + {{- if (not .Values.auth.existingErlangSecret ) }} + rabbitmq-erlang-cookie: {{ print $erlangCookie }} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ ternary (printf "%s-%s" $.Release.Name $key) $key $.Values.extraSecretsPrependReleaseName }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/rabbitmq +data: + provider: {{ print "drycc" | b64enc | quote }} + type: {{ print "rabbitmq" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + username: {{ print $user | b64enc | quote }} + password: {{ print $password | b64enc | quote }} + uri: {{ printf "amqp://%s:%s@%s:%s" $user $password $host $port | b64enc | quote }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/serviceaccount.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/serviceaccount.yaml new file mode 100644 index 00000000..85770a18 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ template "rabbitmq.secretPasswordName" . }} +{{- end }} + diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/servicemonitor.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/servicemonitor.yaml new file mode 100644 index 00000000..e1b57721 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/servicemonitor.yaml @@ -0,0 +1,134 @@ +{{- /* `.Values.metrics.serviceMonitor.enabled` is deprecated, please use default/perObject/detailed */}} +{{- if and + .Values.metrics.enabled + ( or + .Values.metrics.serviceMonitor.enabled + .Values.metrics.serviceMonitor.default.enabled + .Values.metrics.serviceMonitor.perObject.enabled + .Values.metrics.serviceMonitor.detailed.enabled + ) +}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + endpoints: + {{- /* deprecated, please use default/perObject/detailed */}} + {{- if .Values.metrics.serviceMonitor.enabled }} + - port: metrics + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.params }} + params: {{ toYaml .Values.metrics.serviceMonitor.params | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.default.enabled }} + - path: /metrics + port: metrics + {{- if .Values.metrics.serviceMonitor.default.interval }} + interval: {{ .Values.metrics.serviceMonitor.default.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.default.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.default.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.default.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.default.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.default.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.default.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.default.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.default.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.perObject.enabled }} + - path: /metrics/per-object + port: metrics + {{- if .Values.metrics.serviceMonitor.perObject.interval }} + interval: {{ .Values.metrics.serviceMonitor.perObject.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.perObject.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.perObject.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.perObject.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.perObject.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.perObject.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.perObject.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.perObject.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.perObject.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.enabled }} + - path: /metrics/detailed + port: metrics + params: + {{- if .Values.metrics.serviceMonitor.detailed.family }} + family: + {{- range .Values.metrics.serviceMonitor.detailed.family }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.vhost }} + vhost: + {{- range .Values.metrics.serviceMonitor.detailed.vhost }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.interval }} + interval: {{ .Values.metrics.serviceMonitor.detailed.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.detailed.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.detailed.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.detailed.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.detailed.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.detailed.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.podTargetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.targetLabels "context" $) | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/statefulset.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/statefulset.yaml new file mode 100644 index 00000000..d35291ca --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/statefulset.yaml @@ -0,0 +1,524 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.statefulsetAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetAnnotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/config-secret.yaml") . | sha256sum }} + {{- if (include "rabbitmq.createTlsSecret" . ) }} + checksum/configTLS: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.initScripts }} + checksum/initScripts: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" .) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.resources "context" $) | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + - name: prepare-plugins-dir + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + #!/bin/bash + + . /opt/drycc/scripts/liblog.sh + + info "Copying plugins dir to empty dir" + # In order to not break the possibility of installing custom plugins, we need + # to make the plugins directory writable, so we need to copy it to an empty dir volume + cp -r --preserve=mode /opt/drycc/rabbitmq/plugins/ /emptydir/app-plugins-dir + volumeMounts: + - name: empty-dir + mountPath: /emptydir + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/drycc/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/drycc/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- $svcName := printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + {{- if .Values.featureFlags }} + - name: RABBITMQ_FEATURE_FLAGS + value: {{ .Values.featureFlags }} + {{- end }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).{{ $svcName }}.$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_UPDATE_PASSWORD + value: {{ ternary "yes" "no" .Values.auth.updatePassword | quote }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + {{- if .Values.logs }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + {{- end }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: {{ template "rabbitmq.secretErlangKey" . }} + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + - name: RABBITMQ_CLUSTER_REBALANCE + value: "true" + {{- end }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: {{ ternary "yes" "no" .Values.loadDefinition.enabled | quote }} + - name: RABBITMQ_DEFINITIONS_FILE + value: {{ .Values.loadDefinition.file | quote }} + - name: RABBITMQ_SECURE_PASSWORD + value: {{ ternary "yes" "no" (or .Values.auth.securePassword (not .Values.auth.password)) | quote }} + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: {{ template "rabbitmq.secretPasswordKey" . }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: amqp + containerPort: {{ .Values.containerPorts.amqp }} + {{- if .Values.hostPorts.amqp }} + hostPort: {{ .Values.hostPorts.amqp }} + {{- end }} + - name: dist + containerPort: {{ .Values.containerPorts.dist }} + - name: stats + containerPort: {{ .Values.containerPorts.manager }} + {{- if .Values.hostPorts.manager }} + hostPort: {{ .Values.hostPorts.manager }} + {{- end }} + - name: epmd + containerPort: {{ .Values.containerPorts.epmd }} + - name: metrics + containerPort: {{ .Values.containerPorts.metrics }} + {{- if .Values.hostPorts.metrics }} + hostPort: {{ .Values.hostPorts.metrics }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-tls + containerPort: {{ .Values.containerPorts.amqpTls }} + {{- if .Values.hostPorts.amqpTls }} + hostPort: {{ .Values.hostPorts.amqpTls }} + {{- end }} + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q ping + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/virtual-hosts + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/local-alarms + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: {{ternary "amqp-tls" "amqp" .Values.auth.tls.enabled }} + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /drycc/rabbitmq/conf + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/rabbitmq/etc/rabbitmq + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/rabbitmq/var/lib/rabbitmq + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/rabbitmq/.rabbitmq/ + subPath: app-erlang-cookie + - name: empty-dir + mountPath: /opt/drycc/rabbitmq/var/log/rabbitmq + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/rabbitmq/plugins + subPath: app-plugins-dir + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/drycc/rabbitmq/certs + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + mountPath: {{ .Values.ldap.tls.certificatesMountPath }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if .Values.auth.tls.enabled }} + - name: certs + projected: + sources: + - secret: + name: {{ template "rabbitmq.tlsSecretName" . }} + items: + {{- if not .Values.auth.tls.overrideCaCertificate }} + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + {{- end }} + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- if .Values.auth.tls.overrideCaCertificate }} + - secret: + name: {{ .Values.auth.tls.overrideCaCertificate }} + items: + - key: ca.crt + path: ca_certificate.pem + {{- end }} + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + secret: + secretName: {{ .Values.ldap.tls.certificatesSecret }} + {{- end }} + - name: configuration + projected: + sources: + {{- if or (and (empty .Values.configurationExistingSecret) .Values.configuration) (and (not .Values.advancedConfigurationExistingSecret) .Values.advancedConfiguration) }} + - secret: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + {{- end }} + {{- if and .Values.advancedConfigurationExistingSecret (not .Values.advancedConfiguration) }} + - secret: + name: {{ tpl .Values.advancedConfigurationExistingSecret . | quote }} + {{- end }} + {{- if not (empty .Values.configurationExistingSecret) }} + - secret: + name: {{ tpl .Values.configurationExistingSecret . | quote }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "rabbitmq.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ tpl .Values.initScriptsCM . | quote }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.initScriptsSecret . | quote }} + defaultMode: 0755 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc-headless.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc-headless.yaml new file mode 100644 index 00000000..e7e79f3a --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotationsHeadless .Values.commonAnnotations .Values.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.service.annotationsHeadless .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- end }} + {{- if .Values.service.extraPortsHeadless }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPortsHeadless "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + publishNotReadyAddresses: true diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc.yaml new file mode 100644 index 00000000..e82571c9 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/svc.yaml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.service.loadBalancerClass }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqp)) }} + nodePort: {{ .Values.service.nodePorts.amqp }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqpTls)) }} + nodePort: {{ .Values.service.nodePorts.amqpTls }} + {{- end }} + {{- end }} + {{- if .Values.service.epmdPortEnabled }} + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.epmd)) }} + nodePort: {{ .Values.service.nodePorts.epmd }} + {{- end }} + {{- end }} + {{- if .Values.service.distPortEnabled }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dist)) }} + nodePort: {{ .Values.service.nodePorts.dist }} + {{- end }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.manager)) }} + nodePort: {{ .Values.service.nodePorts.manager }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.portNames.metrics }} + port: {{ .Values.service.ports.metrics }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/tls-secrets.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/tls-secrets.yaml new file mode 100644 index 00000000..21deee5a --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/tls-secrets.yaml @@ -0,0 +1,31 @@ +{{- if (include "rabbitmq.createTlsSecret" . ) }} +{{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if not .Values.auth.tls.autoGenerated }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate | b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 36500 }} + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "common.names.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $cert := genSignedCert $fullname nil $altNames 36500 $ca }} + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} + {{- end }} +{{- end }} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/validation.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/validation.yaml new file mode 100644 index 00000000..f72ef7f7 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/templates/validation.yaml @@ -0,0 +1,2 @@ +{{- include "rabbitmq.validateValues" . }} + diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.schema.json b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.schema.json new file mode 100644 index 00000000..8ef33eff --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.yaml b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.yaml new file mode 100644 index 00000000..a8802379 --- /dev/null +++ b/addons/rabbitmq/4.0/chart/rabbitmq-4.0/values.yaml @@ -0,0 +1,1633 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section RabbitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry [default: REGISTRY_NAME] RabbitMQ image registry +## @param image.repository [default: REPOSITORY_NAME/rabbitmq] RabbitMQ image repository +## @skip image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.digest RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: registry.drycc.cc + repository: drycc-addons/rabbitmq + tag: "4.0" + digest: "" + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] +## @section Common parameters +## + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param servicenameOverride String to partially override headless service name +## +servicenameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity +## @param automountServiceAccountToken Mount Service Account token in pod +## +automountServiceAccountToken: true +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param dnsPolicy DNS Policy for pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsPolicy: ClusterFirst +## +dnsPolicy: "" +## @param dnsConfig DNS Configuration pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsConfig: +## options: +## - name: ndots +## value: "4" +## +dnsConfig: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + username: rabbitmq + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + password: "" + ## @param auth.securePassword Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + securePassword: true + ## @param auth.updatePassword Update RabbitMQ password on secret change + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + updatePassword: false + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (existing secret must contain a value for `rabbitmq-password` key or override with setting auth.existingSecretPasswordKey) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + ## @param auth.existingSecretPasswordKey [default: rabbitmq-password] Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.enableLoopbackUser If enabled, the user `auth.username` can only connect from localhost + ## + enableLoopbackUser: false + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + erlangCookie: "" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key or override with auth.existingSecretErlangKey) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + ## @param auth.existingSecretErlangKey [default: rabbitmq-erlang-cookie] Erlang cookie key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingErlangSecret` parameter is set + ## + existingSecretErlangKey: "" + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.sslOptionsPassword.enabled Enable usage of password for private Key + ## @param auth.tls.sslOptionsPassword.existingSecret Name of existing Secret containing the sslOptionsPassword + ## @param auth.tls.sslOptionsPassword.key Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret + ## @param auth.tls.sslOptionsPassword.password Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## @param auth.tls.overrideCaCertificate Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + sslOptionsPassword: + enabled: false + existingSecret: "" + key: "" + password: "" + caCertificate: "" + serverCertificate: "" + serverKey: "" + existingSecret: "" + existingSecretFullChain: false + overrideCaCertificate: "" +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65535" +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256Mi + ## + value: 0.4 +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + +## @param queue_master_locator Changes the queue_master_locator setting in the rabbitmq config file +## +queue_master_locator: min-masters + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap" +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.name RabbitMQ cluster name + ## If not set, a name is generated using the common.names.fullname template + ## + name: "" + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause_minority` or `pause_if_all_down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.file Name of the definitions file + ## + file: "/app/load_definition.json" + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param lifecycleHooks Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## Container Ports +## @param containerPorts.amqp +## @param containerPorts.amqpTls +## @param containerPorts.dist +## @param containerPorts.manager +## @param containerPorts.epmd +## @param containerPorts.metrics +## +containerPorts: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + epmd: 4369 + metrics: 9419 +## Host Ports +## @param hostPorts.amqp +## @param hostPorts.amqpTls +## @param hostPorts.manager +## @param hostPorts.metrics +## +hostPorts: + amqp: "" + amqpTls: "" + manager: "" + metrics: "" + +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## + +## RabbitMQ tcp_listen_options parameters +## See : https://www.rabbitmq.com/networking.html for additional information +## +tcpListenOptions: + ## @param tcpListenOptions.enabled Enable TCP listen options of RabbitMQ + ## + enabled: true + ## @param tcpListenOptions.backlog Maximum size of the unaccepted TCP connections queue + ## + backlog: 128 + ## @param tcpListenOptions.nodelay When set to true, deactivates Nagle's algorithm. Default is true. Highly recommended for most users. + ## + nodelay: true + ## tcpListenOptions.linger + ## + linger: + ## @param tcpListenOptions.linger.lingerOn Enable Server socket lingering + ## + lingerOn: true + ## @param tcpListenOptions.linger.timeout Server Socket lingering timeout + ## + timeout: 0 + ## @param tcpListenOptions.keepalive When set to true, enables TCP keepalives + ## + keepalive: false +configuration: |- + ## Username and password + default_user = {{ .Values.auth.username }} + {{- if and (not .Values.auth.securePassword) .Values.auth.password }} + default_pass = {{ .Values.auth.password }} + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_name = {{ default (include "common.names.fullname" .) .Values.clustering.name }} + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default + cluster_formation.k8s.address_type = {{ .Values.clustering.addressType }} + {{- $svcName := printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + cluster_formation.k8s.service_name = {{ $svcName }} + cluster_formation.k8s.hostname_suffix = .{{ $svcName }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{ if and .Values.clustering.enabled .Values.loadDefinition.enabled }} + cluster_formation.target_cluster_size_hint = {{ .Values.replicaCount }} + {{ end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = {{ .Values.queue_master_locator }} + # enable loopback user + {{- if not (empty .Values.auth.username) }} + loopback_users.{{ .Values.auth.username }} = {{ .Values.auth.enableLoopbackUser }} + {{- else}} + loopback_users.guest = {{ .Values.auth.enableLoopbackUser }} + {{- end }} + {{ template "rabbitmq.extraConfiguration" . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.ports.amqpTls }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/drycc/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/drycc/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/drycc/rabbitmq/certs/server_key.pem + {{- if .Values.auth.tls.sslOptionsPassword.enabled }} + ssl_options.password = {{ include "common.secrets.passwords.manage" (dict "secret" .Values.auth.tls.sslOptionsPassword.existingSecret "key" .Values.auth.tls.sslOptionsPassword.key "providedValues" (list "auth.tls.sslOptionsPassword.password") "skipB64enc" true "failOnNew" false "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1.authn = ldap + auth_backends.1.authz = {{ ternary "ldap" "internal" .Values.ldap.authorisationEnabled }} + auth_backends.2 = internal + {{- $host := list }} + {{- $port := ternary 636 389 .Values.ldap.tls.enabled }} + {{- if .Values.ldap.uri }} + {{- $hostPort := get (urlParse .Values.ldap.uri) "host" }} + {{- $host = list (index (splitList ":" $hostPort) 0) -}} + {{- if (contains ":" $hostPort) }} + {{- $port = index (splitList ":" $hostPort) 1 -}} + {{- end }} + {{- end }} + {{- range $index, $server := concat $host .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ coalesce .Values.ldap.port $port }} + {{- if or .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + auth_ldap.user_dn_pattern = {{ coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + {{- end }} + {{- if .Values.ldap.basedn }} + auth_ldap.dn_lookup_base = {{ .Values.ldap.basedn }} + {{- end }} + {{- if .Values.ldap.uidField }} + auth_ldap.dn_lookup_attribute = {{ .Values.ldap.uidField }} + {{- end }} + {{- if .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.user_dn = {{ .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.password = {{ required "'ldap.bindpw' is required when 'ldap.binddn' is defined" .Values.ldap.bindpw }} + {{- end }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = {{ not .Values.ldap.tls.startTls }} + auth_ldap.use_starttls = {{ .Values.ldap.tls.startTls }} + {{- if .Values.ldap.tls.CAFilename }} + auth_ldap.ssl_options.cacertfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.CAFilename }} + {{- end }} + {{- if .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.certfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.keyfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ required "'ldap.tls.certKeyFilename' is required when 'ldap.tls.certFilename' is defined" .Values.ldap.tls.certKeyFilename }} + {{- end }} + {{- if .Values.ldap.tls.skipVerify }} + auth_ldap.ssl_options.verify = verify_none + auth_ldap.ssl_options.fail_if_no_peer_cert = false + {{- else if .Values.ldap.tls.verify }} + auth_ldap.ssl_options.verify = {{ .Values.ldap.tls.verify }} + {{- end }} + {{- end }} + {{- end }} + ## Prometheus metrics + ## + prometheus.tcp.port = {{ .Values.containerPorts.metrics }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + {{- if (dig "limits" "memory" "" .Values.resources) }} + total_memory_available_override_value = {{ include "rabbitmq.toBytes" (dig "limits" "memory" "" .Values.resources) }} + {{- end }} + {{- if (eq .Values.memoryHighWatermark.type "absolute") }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ include "rabbitmq.toBytes" .Values.memoryHighWatermark.value }} + {{- else if (eq .Values.memoryHighWatermark.type "relative") }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + {{- end }} + {{- if .Values.tcpListenOptions.enabled }} + ## TCP Listen Options + ## + tcp_listen_options.backlog = {{ .Values.tcpListenOptions.backlog }} + tcp_listen_options.nodelay = {{ .Values.tcpListenOptions.nodelay }} + tcp_listen_options.linger.on = {{ .Values.tcpListenOptions.linger.lingerOn }} + tcp_listen_options.linger.timeout = {{ .Values.tcpListenOptions.linger.timeout }} + tcp_listen_options.keepalive = {{ .Values.tcpListenOptions.keepalive }} + {{- end }} +## @param configurationExistingSecret Existing secret with the configuration to use as rabbitmq.conf. +## Must contain the key "rabbitmq.conf" +## Takes precedence over `configuration`, so do not use both simultaneously +## With providing an existingSecret, extraConfiguration and extraConfigurationExistingSecret do not take any effect +## +configurationExistingSecret: "" +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## Do not use simultaneously with `extraConfigurationExistingSecret` +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB +## @param extraConfigurationExistingSecret Existing secret with the extra configuration to append to `configuration`. +## Must contain the key "extraConfiguration" +## Takes precedence over `extraConfiguration`, so do not use both simultaneously +## +extraConfigurationExistingSecret: "" +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## LDAP authorisation example: +## advancedConfiguration: |- +## [{rabbitmq_auth_backend_ldap,[ +## {tag_queries, [{administrator, {constant, true}}, +## {management, {constant, true}}]} +## ]}]. +## +## If both, advancedConfiguration and advancedConfigurationExistingSecret are set, then advancedConfiguration +## will be used instead of the secret. +# +advancedConfiguration: "" +## @param advancedConfigurationExistingSecret Existing secret with the advanced configuration file (must contain a key `advanced.config`). +## Use this as additional configuration in classic config format (Erlang term configuration format) as in advancedConfiguration +## Do not use in combination with advancedConfiguration, will be ignored +## +advancedConfigurationExistingSecret: "" +## This subsystem was introduced in RabbitMQ 3.8.0 to allow rolling upgrades of cluster members without shutting down the entire cluster. +## Feature flags are a mechanism that controls what features are considered to be enabled or available on all cluster nodes. If a feature flag is enabled, so is its associated feature (or behavior). If not then all nodes in the cluster will disable the feature (behavior). +## e.g, drop_unroutable_metric,empty_basic_get_metric,implicit_default_bindings,maintenance_mode_status,quorum_queue,virtual_host_metadata +## @param featureFlags that controls what features are considered to be enabled or available on all cluster nodes. +## +featureFlags: "" +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.uri LDAP connection string. + ## + uri: "" + ## @param ldap.servers List of LDAP servers hostnames. This is valid only if ldap.uri is not set + ## + servers: [] + ## @param ldap.port LDAP servers port. This is valid only if ldap.uri is not set + ## + port: "" + ## DEPRECATED ldap.user_dn_pattern it will removed in a future, please use userDnPattern instead + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.userDnPattern Pattern used to translate the provided username into a value to be used for the LDAP bind. + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + userDnPattern: "" + ## @param ldap.binddn DN of the account used to search in the LDAP server. + ## + binddn: "" + ## @param ldap.bindpw Password for binddn account. + ## + bindpw: "" + ## @param ldap.basedn Base DN path where binddn account will search for the users. + ## + basedn: "" + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ## + uidField: "" + ## @param ldap.authorisationEnabled Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings + ## ref: https://www.rabbitmq.com/ldap.html#authorisation + ## + authorisationEnabled: false + ## @param ldap.tls.enabled Enabled TLS configuration. + ## @param ldap.tls.startTls Use STARTTLS instead of LDAPS. + ## @param ldap.tls.skipVerify Skip any SSL verification (hostanames or certificates) + ## @param ldap.tls.verify Verify connection. Valid values are 'verify_peer' or 'verify_none' + ## @param ldap.tls.certificatesMountPath Where LDAP certifcates are mounted. + ## @param ldap.tls.certificatesSecret Secret with LDAP certificates. + ## @param ldap.tls.CAFilename CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certFilename Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certKeyFilename Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## + tls: + enabled: false + startTls: false + skipVerify: false + verify: "verify_peer" + certificatesMountPath: /opt/drycc/rabbitmq/ldap/certs + certificatesSecret: "" + CAFilename: "" + certFilename: "" + certKeyFilename: "" +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false +## @section Statefulset parameters +## + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param updateStrategy.type Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} +## @param statefulsetAnnotations RabbitMQ statefulset annotations. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +statefulsetAnnotations: {} +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy +## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface +## @param podSecurityContext.supplementalGroups Set filesystem extra groups +## @param podSecurityContext.fsGroup Set RabbitMQ pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 +## @param containerSecurityContext.enabled Enabled RabbitMQ containers' Security Context +## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container +## @param containerSecurityContext.runAsUser Set RabbitMQ containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set RabbitMQ containers' Security Context runAsGroup +## @param containerSecurityContext.runAsNonRoot Set RabbitMQ container's Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). +## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 +## +resourcesPreset: "micro" +## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) +## Example: +## resources: +## requests: +## cpu: 2 +## memory: 512Mi +## limits: +## cpu: 3 +## memory: 1024Mi +## +resources: {} +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: true + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: "" + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. + ## + maxUnavailable: "" +## @section RBAC parameters +## + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + annotations: {} +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + ## @param rbac.rules Custom RBAC rules + ## Example: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## @section Persistence parameters +## +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom RabbitMQ images + ## + mountPath: /opt/drycc/rabbitmq/.rabbitmq/mnesia + ## @param persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi + ## @param persistence.annotations Persistence annotations. Evaluated as a template + ## Example: + ## annotations: + ## example.io/disk-volume-type: SSD + ## + annotations: {} + ## @param persistence.labels Persistence labels. Evaluated as a template + ## Example: + ## labels: + ## app: my-app + labels: {} +## Persistent Volume Claim Retention Policy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## +persistentVolumeClaimRetentionPolicy: + ## @param persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for rabbitmq Statefulset + ## + enabled: true + ## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Delete + +## @section Exposure parameters +## + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + ## + portEnabled: true + ## @param service.distPortEnabled Erlang distribution server port + ## + distPortEnabled: true + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + managerPortEnabled: true + ## @param service.epmdPortEnabled RabbitMQ EPMD Discovery service port + ## + epmdPortEnabled: true + ## Service ports + ## @param service.ports.amqp Amqp service port + ## @param service.ports.amqpTls Amqp TLS service port + ## @param service.ports.dist Erlang distribution service port + ## @param service.ports.manager RabbitMQ Manager service port + ## @param service.ports.metrics RabbitMQ Prometheues metrics service port + ## @param service.ports.epmd EPMD Discovery service port + ## + ports: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + metrics: 9419 + epmd: 4369 + ## Service ports name + ## @param service.portNames.amqp Amqp service port name + ## @param service.portNames.amqpTls Amqp TLS service port name + ## @param service.portNames.dist Erlang distribution service port name + ## @param service.portNames.manager RabbitMQ Manager service port name + ## @param service.portNames.metrics RabbitMQ Prometheues metrics service port name + ## @param service.portNames.epmd EPMD Discovery service port name + ## + portNames: + amqp: "amqp" + amqpTls: "amqp-tls" + dist: "dist" + manager: "http-stats" + metrics: "metrics" + epmd: "epmd" + ## Node ports to expose + ## @param service.nodePorts.amqp Node port for Ampq + ## @param service.nodePorts.amqpTls Node port for Ampq TLS + ## @param service.nodePorts.dist Node port for Erlang distribution + ## @param service.nodePorts.manager Node port for RabbitMQ Manager + ## @param service.nodePorts.epmd Node port for EPMD Discovery + ## @param service.nodePorts.metrics Node port for RabbitMQ Prometheues metrics + ## + nodePorts: + amqp: "" + amqpTls: "" + dist: "" + manager: "" + epmd: "" + metrics: "" + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: [] + ## @param service.extraPortsHeadless Extra ports to expose in the headless service + ## E.g.: + ## extraPortsHeadless: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPortsHeadless: [] + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.allocateLoadBalancerNodePorts Whether to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.loadBalancerClass Set the LoadBalancerClass + ## + loadBalancerClass: "" + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## DEPRECATED service.annotationsHeadless it will removed in a future release, please use service.headless.annotations instead + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template + ## Useful when looking for additional customization, such as using different backend + ## + extraRules: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.existingSecret It is you own the certificate as secret. + ## + existingSecret: "" +## Network Policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) + ## + kubeAPIServerPorts: [443, 6443, 8443] + ## @param networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + + allowCurrentNamespace: true + allowNamespaces: + - a + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param networkPolicy.addExternalClientAccess Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. + ## + addExternalClientAccess: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressPodMatchLabels [object] Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. + ## e.g: + ## ingressPodMatchLabels: + ## my-client: "true" + # + ingressPodMatchLabels: {} + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## @section Metrics Parameters +## + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.ports.metrics }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + ## Scrape metrics from the `/metrics` endpoint + ## ref: https://www.rabbitmq.com/docs/prometheus#default-endpoint + ## + default: + ## @param metrics.serviceMonitor.default.enabled Enable default metrics endpoint (`GET /metrics`) to be scraped by the ServiceMonitor + ## + enabled: false + ## @param metrics.serviceMonitor.default.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.default.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.default.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [ ] + ## @param metrics.serviceMonitor.default.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [ ] + ## @param metrics.serviceMonitor.default.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Scrape metrics from the `/metrics/per-object` endpoint + ## ref: https://www.rabbitmq.com/docs/prometheus#per-object-endpoint + ## + perObject: + ## @param metrics.serviceMonitor.perObject.enabled Enable per-object metrics endpoint (`GET /metrics/per-object`) to be scraped by the ServiceMonitor + ## + enabled: false + ## @param metrics.serviceMonitor.perObject.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.perObject.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.perObject.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [ ] + ## @param metrics.serviceMonitor.perObject.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [ ] + ## @param metrics.serviceMonitor.perObject.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Scrape metrics from the `/metrics/detailed` endpoint + ## ref: https://www.rabbitmq.com/docs/prometheus#detailed-endpoint + ## + detailed: + ## @param metrics.serviceMonitor.detailed.enabled Enable detailed metrics endpoint (`GET /metrics/detailed`) to be scraped by the ServiceMonitor + ## + enabled: false + ## @param metrics.serviceMonitor.detailed.family List of metric families to get + ## e.g. + ## family: ["queue_coarse_metrics", "queue_consumer_count"] + ## + family: [] + ## @param metrics.serviceMonitor.detailed.vhost Filter metrics to only show for the specified vhosts + ## + vhost: [] + ## @param metrics.serviceMonitor.detailed.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.detailed.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.detailed.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [ ] + ## @param metrics.serviceMonitor.detailed.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [ ] + ## @param metrics.serviceMonitor.detailed.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + + ## @param metrics.serviceMonitor.enabled Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + enabled: false + ## @param metrics.serviceMonitor.interval Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + relabelings: [ ] + ## @param metrics.serviceMonitor.metricRelabelings Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + metricRelabelings: [ ] + ## @param metrics.serviceMonitor.honorLabels Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + honorLabels: false + ## @param metrics.serviceMonitor.path Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + path: "" + ## @param metrics.serviceMonitor.params Deprecated. Please use `metrics.serviceMonitor.{default/perObject/detailed}` instead. + ## + params: { } + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "common.names.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "common.names.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "common.names.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "common.names.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "common.names.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "common.names.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] +## @section Init Container Parameters +## + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: "bookworm" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + seLinuxOptions: {} + runAsUser: 0 diff --git a/addons/rabbitmq/4.0/meta.yaml b/addons/rabbitmq/4.0/meta.yaml new file mode 100644 index 00000000..e118676c --- /dev/null +++ b/addons/rabbitmq/4.0/meta.yaml @@ -0,0 +1,51 @@ +name: rabbitmq-4.0 +version: "4.0" +id: 8cf49e53-ed76-4930-82eb-ea0e6535cf7d +description: "RabbitMQ is the most widely deployed open source message broker.." +displayName: "rabbitmq-4.0" +metadata: + displayName: "rabbitmq-4.0" + provider: + name: drycc + supportURL: https://www.rabbitmq.com/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/rabbitmq +tags: rabbitmq +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "nodeSelector" + required: false + description: "nodeSelector config for values.yaml" +- name: "auth" + required: false + description: "auth config for values.yaml" +- name: "initScripts" + required: false + description: "initScripts config for values.yaml" +- name: "extraPlugins" + required: false + description: "extraPlugins config for values.yaml" +- name: "extraContainerPorts" + required: false + description: "extraContainerPorts config for values.yaml" +- name: "extraPorts" + required: false + description: "extraPorts config for values.yaml" +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "service.extraPorts" + required: false + description: "service.extraPorts config for values.yaml" +- name: "metrics.enabled" + required: false + description: "metrics enabled or not config for values.yaml" +- name: "extraConfiguration" + required: false + description: "extraConfiguration config for values.yaml" +archive: false diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/bind.yaml b/addons/rabbitmq/4.0/plans/standard-16c32g3w/bind.yaml new file mode 100644 index 00000000..d3271f8c --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-16c32g3w/bind.yaml @@ -0,0 +1,55 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: RABBITMQ_AMQP_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' + {{- end }} + + {{- if .Values.auth.tls.enabled }} + - name: RABBITMQ_AMQP_SSL_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp-tls")].port }' + {{- end }} + + {{- if .Values.auth.username }} + - name: RABBITMQ_USER + value: {{ .Values.auth.username | quote }} + {{- end }} + + {{- if (not .Values.auth.existingPasswordSecret ) }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.rabbitmq-password }' + {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/meta.yaml b/addons/rabbitmq/4.0/plans/standard-16c32g3w/meta.yaml new file mode 100644 index 00000000..2801fa86 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-16c32g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c32g3w" +id: 106130bc-8f94-4c26-a38d-e421cef7ebe4 +description: "RabbitMQ standard-16c32g3w plan which limit resources 16 cores 32G memory and 3 workers." +displayName: "standard-16c32g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml new file mode 100644 index 00000000..c951cdbf --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: hb-rabbitmq-standard-16c32g + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 8 + memory: 16Gi + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 64Gi diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/bind.yaml b/addons/rabbitmq/4.0/plans/standard-2c4g3w/bind.yaml new file mode 100644 index 00000000..d3271f8c --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-2c4g3w/bind.yaml @@ -0,0 +1,55 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: RABBITMQ_AMQP_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' + {{- end }} + + {{- if .Values.auth.tls.enabled }} + - name: RABBITMQ_AMQP_SSL_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp-tls")].port }' + {{- end }} + + {{- if .Values.auth.username }} + - name: RABBITMQ_USER + value: {{ .Values.auth.username | quote }} + {{- end }} + + {{- if (not .Values.auth.existingPasswordSecret ) }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.rabbitmq-password }' + {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/meta.yaml b/addons/rabbitmq/4.0/plans/standard-2c4g3w/meta.yaml new file mode 100644 index 00000000..63ba3499 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-2c4g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g3w" +id: 7d190a7c-2b0d-4d0d-8bf5-19adbdf88ff6 +description: "RabbitMQ standard-2c4g3w plan which limit resources 2 cores 4G memory and 3 workers." +displayName: "standard-2c4g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml new file mode 100644 index 00000000..c9e6dbdf --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: hb-rabbitmq-standard-2c4g + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/bind.yaml b/addons/rabbitmq/4.0/plans/standard-4c8g3w/bind.yaml new file mode 100644 index 00000000..d3271f8c --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-4c8g3w/bind.yaml @@ -0,0 +1,55 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: RABBITMQ_AMQP_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' + {{- end }} + + {{- if .Values.auth.tls.enabled }} + - name: RABBITMQ_AMQP_SSL_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp-tls")].port }' + {{- end }} + + {{- if .Values.auth.username }} + - name: RABBITMQ_USER + value: {{ .Values.auth.username | quote }} + {{- end }} + + {{- if (not .Values.auth.existingPasswordSecret ) }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.rabbitmq-password }' + {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/meta.yaml b/addons/rabbitmq/4.0/plans/standard-4c8g3w/meta.yaml new file mode 100644 index 00000000..415b459b --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-4c8g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g3w" +id: 89ac8765-ba6b-47ff-8347-904c3bac36ee +description: "RabbitMQ standard-4c8g3w plan which limit resources 4 cores 8G memory and 3 workers." +displayName: "standard-4c8g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml new file mode 100644 index 00000000..1db3c36e --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: hb-rabbitmq-standard-4c8g + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 2 + memory: 4Gi + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 16Gi diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/bind.yaml b/addons/rabbitmq/4.0/plans/standard-8c16g3w/bind.yaml new file mode 100644 index 00000000..d3271f8c --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-8c16g3w/bind.yaml @@ -0,0 +1,55 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: RABBITMQ_AMQP_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp")].port }' + {{- end }} + + {{- if .Values.auth.tls.enabled }} + - name: RABBITMQ_AMQP_SSL_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="amqp-tls")].port }' + {{- end }} + + {{- if .Values.auth.username }} + - name: RABBITMQ_USER + value: {{ .Values.auth.username | quote }} + {{- end }} + + {{- if (not .Values.auth.existingPasswordSecret ) }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.rabbitmq-password }' + {{- end }} + + {{- if (include "rabbitmq.createTlsSecret" . ) }} + {{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} + - name: CA_CRT + valueFrom: + secretKeyRef: + name: {{ $secretName }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/meta.yaml b/addons/rabbitmq/4.0/plans/standard-8c16g3w/meta.yaml new file mode 100644 index 00000000..e0b8592d --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-8c16g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g3w" +id: 3fd10f9e-5659-4dee-b708-2cb5bbb19521 +description: "RabbitMQ standard-8c16g3w plan which limit resources 8 cores 16G memory and 3 workers." +displayName: "standard-8c16g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml new file mode 100644 index 00000000..e50a93f0 --- /dev/null +++ b/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: hb-rabbitmq-standard-8c16g + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 4 + memory: 8Gi + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 32Gi From 5c2a78deb1db19d9d498c0b23c8f0f38c3a28f87 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 10 Mar 2025 09:59:01 +0800 Subject: [PATCH 41/93] chore(redis-cluste): update defaultConfigOverride config --- .../7.0/chart/redis-cluster/templates/configmap.yaml | 9 ++++++--- addons/redis-cluster/7.0/chart/redis-cluster/values.yaml | 5 +++++ addons/redis-cluster/7.0/meta.yaml | 6 ++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml index 85b06212..de9ffc3d 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/templates/configmap.yaml @@ -100,6 +100,10 @@ data: filename: {{ template "redis-cluster.tlsCACert" . }} {{- end }} {{- end }} +{{- if .Values.redis.defaultConfigOverride }} + redis-default.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.redis.defaultConfigOverride "context" $) | nindent 4 }} +{{- else }} redis-default.conf: |- # Redis configuration file example. # @@ -534,9 +538,7 @@ data: # You can set these explicitly by uncommenting the following line. # # save 3600 1 300 100 60 10000 - save 900 1 - save 300 10 - save 60 10000 + save 900 1 300 10 60 10000 # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting @@ -2379,6 +2381,7 @@ data: # to suppress # # ignore-warnings ARM64-COW-BUG +{{- end }} {{- if .Values.redis.configmap }} {{- include "common.tplvalues.render" (dict "value" .Values.redis.configmap "context" $) | nindent 4 }} {{- end }} diff --git a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml index 5e910cd6..5b6b09d1 100644 --- a/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml +++ b/addons/redis-cluster/7.0/chart/redis-cluster/values.yaml @@ -470,6 +470,11 @@ redis: ## @param redis.priorityClassName Redis® Master pod priorityClassName ## priorityClassName: "" + ## @param redis.defaultConfigOverride Optional default Redis® configuration for the nodes + ## If not set, the default Redis configuration from the chart is used + ## ref: https://redis.io/topics/config + ## + defaultConfigOverride: "" ## @param redis.configmap Additional Redis® configuration for the nodes ## ref: https://redis.io/topics/config ## diff --git a/addons/redis-cluster/7.0/meta.yaml b/addons/redis-cluster/7.0/meta.yaml index 5936a22d..074e22a1 100644 --- a/addons/redis-cluster/7.0/meta.yaml +++ b/addons/redis-cluster/7.0/meta.yaml @@ -21,6 +21,12 @@ allow_parameters: - name: "redis.useAOFPersistence" required: false description: "redis.useAOFPersistence config for values.yaml" +- name: "redis.defaultConfigOverride" + required: false + description: "redis.defaultConfigOverride config for values.yaml" +- name: "redis.configmap" + required: false + description: "redis.configmap config for values.yaml" - name: "redis.nodeSelector" required: false description: "redis.nodeSelector config for values.yaml" From 368d2688ee498652d35295b367941c066ead68f8 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 17 Mar 2025 11:08:24 +0800 Subject: [PATCH 42/93] remove apollo --- addons/apollo/2.3/chart/apollo/.helmignore | 21 -- addons/apollo/2.3/chart/apollo/1.yaml | 260 ----------------- addons/apollo/2.3/chart/apollo/Chart.yaml | 25 -- addons/apollo/2.3/chart/apollo/README.md | 0 .../2.3/chart/apollo/templates/NOTES.txt | 0 .../2.3/chart/apollo/templates/_helpers.tpl | 133 --------- .../apollo/templates/adminservice/NOTES.txt | 32 -- .../templates/adminservice/deployment.yaml | 88 ------ .../templates/adminservice/ingress.yaml | 63 ---- .../apollo/templates/adminservice/secret.yaml | 20 -- .../templates/adminservice/service.yaml | 22 -- .../apollo/templates/configservice/NOTES.txt | 32 -- .../templates/configservice/deployment.yaml | 88 ------ .../templates/configservice/ingress.yaml | 63 ---- .../templates/configservice/secret.yaml | 22 -- .../templates/configservice/service.yaml | 22 -- .../chart/apollo/templates/portal/NOTES.txt | 25 -- .../apollo/templates/portal/deployment.yaml | 102 ------- .../apollo/templates/portal/ingress.yaml | 64 ---- .../chart/apollo/templates/portal/secret.yaml | 36 --- .../apollo/templates/portal/service.yaml | 23 -- addons/apollo/2.3/chart/apollo/values.yaml | 273 ------------------ addons/apollo/2.3/meta.yaml | 27 -- .../2.3/plans/standard-1c2g2w/bind.yaml | 43 --- .../create-instance-schema.json | 12 - .../2.3/plans/standard-1c2g2w/meta.yaml | 6 - .../2.3/plans/standard-1c2g2w/values.yaml | 60 ---- 27 files changed, 1562 deletions(-) delete mode 100644 addons/apollo/2.3/chart/apollo/.helmignore delete mode 100644 addons/apollo/2.3/chart/apollo/1.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/Chart.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/README.md delete mode 100644 addons/apollo/2.3/chart/apollo/templates/NOTES.txt delete mode 100644 addons/apollo/2.3/chart/apollo/templates/_helpers.tpl delete mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt delete mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt delete mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt delete mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/templates/portal/service.yaml delete mode 100644 addons/apollo/2.3/chart/apollo/values.yaml delete mode 100644 addons/apollo/2.3/meta.yaml delete mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml delete mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json delete mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml delete mode 100644 addons/apollo/2.3/plans/standard-1c2g2w/values.yaml diff --git a/addons/apollo/2.3/chart/apollo/.helmignore b/addons/apollo/2.3/chart/apollo/.helmignore deleted file mode 100644 index f0c13194..00000000 --- a/addons/apollo/2.3/chart/apollo/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/addons/apollo/2.3/chart/apollo/1.yaml b/addons/apollo/2.3/chart/apollo/1.yaml deleted file mode 100644 index c6edc2b8..00000000 --- a/addons/apollo/2.3/chart/apollo/1.yaml +++ /dev/null @@ -1,260 +0,0 @@ ---- -# Source: apollo/templates/adminservice/secret.yaml -kind: Secret -apiVersion: v1 -metadata: - name: release-name-apollo-adminservice -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 ---- -# Source: apollo/templates/portal/secret.yaml -kind: Secret -apiVersion: v1 -metadata: - name: release-name-apollo-portal -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloPortalDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 - apollo.portal.envs = dev - apollo-env.properties: | - dev = release-name-apollo-configservice:8080 ---- -# Source: apollo/templates/configservice/secret.yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: release-name-apollo-configservice -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 - apollo.config-service.url = http://release-name-apollo-configservice.default:8080 - apollo.admin-service.url = http://release-name-apollo-adminservice.default:8090 ---- -# Source: apollo/templates/adminservice/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-adminservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8090 - targetPort: 8090 - selector: - app: release-name-apollo-adminservice ---- -# Source: apollo/templates/configservice/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-configservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - selector: - app: release-name-apollo-configservice ---- -# Source: apollo/templates/portal/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-portal - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8070 - targetPort: 8070 - selector: - app: release-name-apollo-portal - sessionAffinity: ClientIP ---- -# Source: apollo/templates/adminservice/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-adminservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 2 - selector: - matchLabels: - app: release-name-apollo-adminservice - template: - metadata: - labels: - app: release-name-apollo-adminservice - spec: - volumes: - - name: volume-configmap-release-name-apollo-adminservice - configMap: - name: release-name-apollo-adminservice - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: apollo-adminservice - image: "drycc-addons/apollo-adminservice:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8090 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,kubernetes" - volumeMounts: - - name: volume-configmap-release-name-apollo-adminservice - mountPath: /apollo-adminservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: 8090 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8090 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/configservice/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-configservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 2 - selector: - matchLabels: - app: release-name-apollo-configservice - template: - metadata: - labels: - app: release-name-apollo-configservice - spec: - volumes: - - name: volume-configmap-release-name-apollo-configservice - configMap: - name: release-name-apollo-configservice - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: apollo-configservice - image: "drycc-addons/apollo-configservice:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8080 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,kubernetes" - volumeMounts: - - name: volume-configmap-release-name-apollo-configservice - mountPath: /apollo-configservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/portal/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-portal - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 1 - selector: - matchLabels: - app: release-name-apollo-portal - template: - metadata: - labels: - app: release-name-apollo-portal - spec: - volumes: - - name: secret-release-name-apollo-portal - Secret: - name: release-name-apollo-portal - items: - - key: application-github.properties - path: application-github.properties - - key: apollo-env.properties - path: apollo-env.properties - defaultMode: 420 - containers: - - name: apollo-portal - image: "drycc-addons/apollo-portal:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8070 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,auth" - volumeMounts: - - name: secret-release-name-apollo-portal - mountPath: /apollo-portal/config/application-github.properties - subPath: application-github.properties - - name: secret-release-name-apollo-portal - mountPath: /apollo-portal/config/apollo-env.properties - subPath: apollo-env.properties - livenessProbe: - tcpSocket: - port: 8070 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8070 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/portal/ingress.yaml -# diff --git a/addons/apollo/2.3/chart/apollo/Chart.yaml b/addons/apollo/2.3/chart/apollo/Chart.yaml deleted file mode 100644 index 07bdb346..00000000 --- a/addons/apollo/2.3/chart/apollo/Chart.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright Drycc Community. -# SPDX-License-Identifier: APACHE-2.0 - -annotations: - category: config - licenses: Apache-2.0 -apiVersion: v2 -appVersion: "2.3.0" -dependencies: -- name: common - repository: oci://registry.drycc.cc/charts - version: ~1.1.3 -description: A Helm chart for Apollo Config Service and Apollo Admin Service -home: https://github.com/apolloconfig/apollo -icon: https://raw.githubusercontent.com/apolloconfig/apollo/master/apollo-portal/src/main/resources/static/img/logo-simple.png -keywords: -- apollo -- apolloconfig -maintainers: -- name: Drycc Community. - url: https://github.com/drycc-addons/addons -name: apollo -sources: -- https://github.com/drycc-addons/addons -version: 0.1.0 diff --git a/addons/apollo/2.3/chart/apollo/README.md b/addons/apollo/2.3/chart/apollo/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/addons/apollo/2.3/chart/apollo/templates/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/NOTES.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl b/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl deleted file mode 100644 index 6baef133..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/_helpers.tpl +++ /dev/null @@ -1,133 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - - -{{/* -Full name for portal service -*/}} -{{- define "apollo.portal.fullName" -}} -{{- if .Values.portal.fullNameOverride -}} -{{- .Values.portal.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.portal.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.portal.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - - -{{/* -Common labels -*/}} -{{- define "apollo.labels" -}} -{{- if .Chart.AppVersion -}} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -{{- end -}} - -{{/* -Service name for portal -*/}} -{{- define "apollo.portal.serviceName" -}} -{{- if .Values.portal.service.fullNameOverride -}} -{{- .Values.portal.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.portal.fullName" .}} -{{- end -}} -{{- end -}} - - -{{/* vim: set filetype=mustache: */}} - -{{/* -Service name for configdb -*/}} -{{- define "apollo.configdb.serviceName" -}} -{{- .Values.apolloService.configdb.host -}} -{{- end -}} - -{{/* -Service port for configdb -*/}} -{{- define "apollo.configdb.servicePort" -}} -{{- if .Values.apolloService.configdb.service.enabled -}} -{{- .Values.apolloService.configdb.service.port -}} -{{- else -}} -{{- .Values.apolloService.configdb.port -}} -{{- end -}} -{{- end -}} - -{{/* -Full name for config service -*/}} -{{- define "apollo.configService.fullName" -}} -{{- if .Values.apolloService.configService.fullNameOverride -}} -{{- .Values.apolloService.configService.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.apolloService.configService.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.apolloService.configService.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Service name for config service -*/}} -{{- define "apollo.configService.serviceName" -}} -{{- if .Values.apolloService.configService.service.fullNameOverride -}} -{{- .Values.apolloService.configService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.configService.fullName" .}} -{{- end -}} -{{- end -}} - -{{/* -Config service url to be accessed by apollo-client -*/}} -{{- define "apollo.configService.serviceUrl" -}} -{{- if .Values.apolloService.configService.config.configServiceUrlOverride -}} -{{ .Values.apolloService.configService.config.configServiceUrlOverride }} -{{- else -}} -http://{{ include "apollo.configService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- end -}} -{{- end -}} - -{{/* -Full name for admin service -*/}} -{{- define "apollo.adminService.fullName" -}} -{{- if .Values.apolloService.adminService.fullNameOverride -}} -{{- .Values.apolloService.adminService.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.apolloService.adminService.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.apolloService.adminService.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Service name for admin service -*/}} -{{- define "apollo.adminService.serviceName" -}} -{{- if .Values.apolloService.adminService.service.fullNameOverride -}} -{{- .Values.apolloService.adminService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.adminService.fullName" .}} -{{- end -}} -{{- end -}} - -{{/* -Admin service url to be accessed by apollo-portal -*/}} -{{- define "apollo.adminService.serviceUrl" -}} -{{- if .Values.apolloService.configService.config.adminServiceUrlOverride -}} -{{ .Values.apolloService.configService.config.adminServiceUrlOverride -}} -{{- else -}} -http://{{ include "apollo.adminService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.adminService.service.port }}{{ .Values.apolloService.adminService.config.contextPath }} -{{- end -}} -{{- end -}} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt deleted file mode 100644 index 78ce9341..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/adminservice/NOTES.txt +++ /dev/null @@ -1,32 +0,0 @@ -Meta service url for current release: -{{- if contains "NodePort" .Values.apolloService.configService.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} - echo {{ include "apollo.configService.serviceUrl" .}} - -For local test use: - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 -{{- end }} - -{{- if .Values.apolloService.configService.ingress.enabled }} - -Ingress: -{{- range $host := .Values.apolloService.configService.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} - -Urls registered to meta service: -Config service: {{ include "apollo.configService.serviceUrl" .}} -Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml deleted file mode 100644 index d115a3c3..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/adminservice/deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $adminServiceFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.apolloService.adminService.replicaCount }} - selector: - matchLabels: - app: {{ $adminServiceFullName }} - {{- with .Values.apolloService.adminService.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $adminServiceFullName }} - {{- with .Values.apolloService.adminService.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.apolloService.adminService.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: volume-configmap-{{ $adminServiceFullName }} - configMap: - name: {{ $adminServiceFullName }} - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: {{ .Values.apolloService.adminService.name }} - image: "{{ .Values.apolloService.adminService.image.repository }}:{{ .Values.apolloService.adminService.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.apolloService.adminService.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.apolloService.adminService.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.apolloService.adminService.config.profiles | quote }} - {{- range $key, $value := .Values.apolloService.adminService.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: volume-configmap-{{ $adminServiceFullName }} - mountPath: /apollo-adminservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: {{ .Values.apolloService.adminService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.adminService.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.adminService.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.apolloService.adminService.config.contextPath }}/health - port: {{ .Values.apolloService.adminService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.adminService.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.adminService.readiness.periodSeconds }} - resources: - {{- toYaml .Values.apolloService.adminService.resources | nindent 12 }} - {{- with .Values.apolloService.adminService.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.adminService.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.adminService.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml deleted file mode 100644 index 1f5efcdf..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/adminservice/ingress.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- if .Values.apolloService.adminService.ingress.enabled -}} -{{- $fullName := include "apollo.adminService.fullName" . -}} -{{- $svcPort := .Values.apolloService.adminService.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.apolloService.adminService.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.apolloService.adminService.ingress.ingressClassName }} - ingressClassName: {{ .Values.apolloService.adminService.ingress.ingressClassName }} -{{- end }} -{{- if .Values.apolloService.adminService.ingress.tls }} - tls: - {{- range .Values.apolloService.adminService.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.apolloService.adminService.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml deleted file mode 100644 index d7f35e89..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/adminservice/secret.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} -kind: Secret -apiVersion: v1 -metadata: - name: {{ $adminServiceFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} - spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} - {{- if .Values.apolloService.adminService.config.contextPath }} - server.servlet.context-path = {{ .Values.apolloService.adminService.config.contextPath }} - {{- end }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml b/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml deleted file mode 100644 index ff44f6cc..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/adminservice/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.adminService.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.apolloService.adminService.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.apolloService.adminService.service.port }} - targetPort: {{ .Values.apolloService.adminService.service.targetPort }} - selector: - app: {{ include "apollo.adminService.fullName" . }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt deleted file mode 100644 index 78ce9341..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/configservice/NOTES.txt +++ /dev/null @@ -1,32 +0,0 @@ -Meta service url for current release: -{{- if contains "NodePort" .Values.apolloService.configService.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} - echo {{ include "apollo.configService.serviceUrl" .}} - -For local test use: - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 -{{- end }} - -{{- if .Values.apolloService.configService.ingress.enabled }} - -Ingress: -{{- range $host := .Values.apolloService.configService.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} - -Urls registered to meta service: -Config service: {{ include "apollo.configService.serviceUrl" .}} -Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml deleted file mode 100644 index 947e8eb9..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/configservice/deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $configServiceFullName := include "apollo.configService.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $configServiceFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.apolloService.configService.replicaCount }} - selector: - matchLabels: - app: {{ $configServiceFullName }} - {{- with .Values.apolloService.configService.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $configServiceFullName }} - {{- with .Values.apolloService.configService.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.apolloService.configService.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: volume-configmap-{{ $configServiceFullName }} - configMap: - name: {{ $configServiceFullName }} - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: {{ .Values.apolloService.configService.name }} - image: "{{ .Values.apolloService.configService.image.repository }}:{{ .Values.apolloService.configService.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.apolloService.configService.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.apolloService.configService.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.apolloService.configService.config.profiles | quote }} - {{- range $key, $value := .Values.apolloService.configService.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: volume-configmap-{{ $configServiceFullName }} - mountPath: /apollo-configservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: {{ .Values.apolloService.configService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.configService.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.configService.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.apolloService.configService.config.contextPath }}/health - port: {{ .Values.apolloService.configService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.configService.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.configService.readiness.periodSeconds }} - resources: - {{- toYaml .Values.apolloService.configService.resources | nindent 12 }} - {{- with .Values.apolloService.configService.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.configService.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.configService.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml deleted file mode 100644 index 36fc5421..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/configservice/ingress.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- if .Values.apolloService.configService.ingress.enabled -}} -{{- $fullName := include "apollo.configService.fullName" . -}} -{{- $svcPort := .Values.apolloService.configService.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.apolloService.configService.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.apolloService.configService.ingress.ingressClassName }} - ingressClassName: {{ .Values.apolloService.configService.ingress.ingressClassName }} -{{- end }} -{{- if .Values.apolloService.configService.ingress.tls }} - tls: - {{- range .Values.apolloService.configService.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.apolloService.configService.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml deleted file mode 100644 index 5224b51d..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/configservice/secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $configServiceFullName := include "apollo.configService.fullName" . }} -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ $configServiceFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} - spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} - apollo.config-service.url = {{ include "apollo.configService.serviceUrl" .}} - apollo.admin-service.url = {{ include "apollo.adminService.serviceUrl" .}} - {{- if .Values.apolloService.configService.config.contextPath }} - server.servlet.context-path = {{ .Values.apolloService.configService.config.contextPath }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml b/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml deleted file mode 100644 index 9bcbb5e1..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/configservice/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.configService.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.apolloService.configService.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.apolloService.configService.service.port }} - targetPort: {{ .Values.apolloService.configService.service.targetPort }} - selector: - app: {{ include "apollo.configService.fullName" . }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt b/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt deleted file mode 100644 index d49cfe0e..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/portal/NOTES.txt +++ /dev/null @@ -1,25 +0,0 @@ -Portal url for current release: -{{- if contains "NodePort" .Values.portal.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.portal.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.portal.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.portal.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.portal.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.portal.service.port }} -{{- else if contains "ClusterIP" .Values.portal.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.portal.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8070 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8070:8070 -{{- end }} - -{{- if .Values.portal.ingress.enabled }} - -Ingress: -{{- range $host := .Values.portal.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.portal.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml deleted file mode 100644 index 7c86b3e7..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/portal/deployment.yaml +++ /dev/null @@ -1,102 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -{{- $portalFullName := include "apollo.portal.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $portalFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.portal.replicaCount }} - selector: - matchLabels: - app: {{ $portalFullName }} - {{- with .Values.portal.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $portalFullName }} - {{- with .Values.portal.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.portal.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: secret-{{ $portalFullName }} - Secret: - name: {{ $portalFullName }} - items: - - key: application-github.properties - path: application-github.properties - - key: apollo-env.properties - path: apollo-env.properties - {{- range $fileName, $content := .Values.portal.config.files }} - - key: {{ $fileName }} - path: {{ $fileName }} - {{- end }} - defaultMode: 420 - containers: - - name: {{ .Values.portal.name }} - image: "{{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.portal.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.portal.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.portal.config.profiles | quote }} - {{- range $key, $value := .Values.portal.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/application-github.properties - subPath: application-github.properties - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/apollo-env.properties - subPath: apollo-env.properties - {{- range $fileName, $content := .Values.portal.config.files }} - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/{{ $fileName }} - subPath: {{ $fileName }} - {{- end }} - livenessProbe: - tcpSocket: - port: {{ .Values.portal.containerPort }} - initialDelaySeconds: {{ .Values.portal.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.portal.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.portal.config.contextPath }}/health - port: {{ .Values.portal.containerPort }} - initialDelaySeconds: {{ .Values.portal.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.portal.readiness.periodSeconds }} - resources: - {{- toYaml .Values.portal.resources | nindent 12 }} - {{- with .Values.portal.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.portal.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.portal.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml deleted file mode 100644 index b01dc05b..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/portal/ingress.yaml +++ /dev/null @@ -1,64 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -# -{{- if .Values.portal.ingress.enabled -}} -{{- $fullName := include "apollo.portal.fullName" . -}} -{{- $svcPort := .Values.portal.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.portal.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.portal.ingress.ingressClassName }} - ingressClassName: {{ .Values.portal.ingress.ingressClassName }} -{{- end }} -{{- if .Values.portal.ingress.tls }} - tls: - {{- range .Values.portal.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.portal.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml deleted file mode 100644 index 32e8cfbb..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/portal/secret.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -{{- $portalFullName := include "apollo.portal.fullName" . }} -kind: Secret -apiVersion: v1 -metadata: - name: {{ $portalFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.portal.portaldb.host }}:{{ .Values.portal.portaldb.port }}/{{ .Values.portal.portaldb.dbName }}{{ if .Values.portal.portaldb.connectionStringProperties }}?{{ .Values.portal.portaldb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "portaldb.userName is required!" .Values.portal.portaldb.userName }} - spring.datasource.password = {{ required "portaldb.password is required!" .Values.portal.portaldb.password }} - {{- if .Values.portal.config.envs }} - apollo.portal.envs = {{ .Values.portal.config.envs }} - {{- end }} - {{- if .Values.portal.config.contextPath }} - server.servlet.context-path = {{ .Values.portal.config.contextPath }} - {{- end }} - apollo-env.properties: | - {{- if .Values.apolloService.enabled }} - {{ .Values.apolloService.meta }} = {{ include "apollo.configService.serviceName" . }}:{{ .Values.apolloService.configService.service.port }} - {{- end }} - {{- if .Values.portal.config.metaServers }} - {{- range $env, $address := .Values.portal.config.metaServers }} - {{ $env }}.meta = {{ $address }} - {{- end }} - {{- end }} -{{- range $fileName, $content := .Values.portal.config.files }} -{{ $fileName | indent 2 }}: | -{{ $content | indent 4 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml b/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml deleted file mode 100644 index da8237d5..00000000 --- a/addons/apollo/2.3/chart/apollo/templates/portal/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.portal.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.portal.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.portal.service.port }} - targetPort: {{ .Values.portal.service.targetPort }} - selector: - app: {{ include "apollo.portal.fullName" . }} - sessionAffinity: {{ .Values.portal.service.sessionAffinity }} -{{- end }} diff --git a/addons/apollo/2.3/chart/apollo/values.yaml b/addons/apollo/2.3/chart/apollo/values.yaml deleted file mode 100644 index 7a00ff0e..00000000 --- a/addons/apollo/2.3/chart/apollo/values.yaml +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright Drycc Community. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "" - -## @section Common parameters - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] -## @param namespaceOverride String to fully override common.names.namespace -## -namespaceOverride: "" - -## Enable diagnostic mode in the deployment -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity - -portal: - enabled: true - name: apollo-portal - fullNameOverride: "" - replicaCount: 1 - containerPort: 8070 - image: - registry: registry.drycc.cc - repository: drycc-addons/apollo-portal - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8070 - targetPort: 8070 - type: ClusterIP - sessionAffinity: ClientIP - ingress: - ingressClassName: null - enabled: false - annotations: {} - hosts: - - host: "" - paths: [] - tls: [] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - - config: - # spring profiles to activate - profiles: "github,auth" - # specify the env names, e.g. dev,pro - envs: "dev" - # specify the meta servers, e.g. - # dev: http://apollo-configservice-dev:8080 - # pro: http://apollo-configservice-pro:8080 - metaServers: "" - # specify the context path, e.g. /apollo - contextPath: "" - # extra config files for apollo-portal, e.g. application-ldap.yml - files: {} - - portaldb: - # apolloportaldb host - host: "" - port: 3306 - dbName: ApolloPortalDB - # apolloportaldb user name - userName: "1" - # apolloportaldb password - password: "1" - connectionStringProperties: characterEncoding=utf8 - - -## @section Apollo parameters - -apolloService: - enabled: true - meta: "dev" - configdb: - # apolloconfigdb host - host: "" - port: 3306 - dbName: ApolloConfigDB - # apolloconfigdb user name - userName: "1" - # apolloconfigdb password - password: "1" - connectionStringProperties: characterEncoding=utf8 - - configService: - name: apollo-configservice - fullNameOverride: "" - replicaCount: 2 - containerPort: 8080 - image: - repository: drycc-addons/apollo-configservice - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8080 - targetPort: 8080 - type: ClusterIP - ingress: - ingressClassName: null - enabled: false - annotations: { } - hosts: - - host: "" - paths: [ ] - tls: [ ] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - config: - # spring profiles to activate - profiles: "github,kubernetes" - # override apollo.config-service.url: config service url to be accessed by apollo-client - configServiceUrlOverride: "" - # override apollo.admin-service.url: admin service url to be accessed by apollo-portal - adminServiceUrlOverride: "" - # specify the context path, e.g. /apollo - contextPath: "" - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - - adminService: - name: apollo-adminservice - fullNameOverride: "" - replicaCount: 2 - containerPort: 8090 - image: - registry: registry.drycc.cc - repository: drycc-addons/apollo-adminservice - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8090 - targetPort: 8090 - type: ClusterIP - ingress: - ingressClassName: null - enabled: false - annotations: { } - hosts: - - host: "" - paths: [ ] - tls: [ ] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - config: - # spring profiles to activate - profiles: "github,kubernetes" - # specify the context path, e.g. /apollo - contextPath: "" - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - diff --git a/addons/apollo/2.3/meta.yaml b/addons/apollo/2.3/meta.yaml deleted file mode 100644 index edb96d83..00000000 --- a/addons/apollo/2.3/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: apollo -version: 2 -id: 06653a76-126d-4c9d-a929-e4841185ab68 -description: "apollo." -displayName: "apollo" -metadata: - displayName: "apollo" - provider: - name: drycc - supportURL: https://www.apolloconfig.com/ - documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/apollo-adminservice -tags: apollo -bindable: true -instances_retrievable: true -bindings_retrievable: true -plan_updateable: true -allow_parameters: -- name: "portal.enabled" - required: false - description: "portal.enabled config for values.yaml" -- name: "portal.config" - required: false - description: "portal.config config for values.yaml" -- name: "portal.portaldb" - required: false - description: "portal.config config for values.yaml" -archive: false diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml deleted file mode 100644 index e37ddd93..00000000 --- a/addons/apollo/2.3/plans/standard-1c2g2w/bind.yaml +++ /dev/null @@ -1,43 +0,0 @@ -credential: - {{ if (eq .Values.service.type "LoadBalancer") }} - - name: EXTERNAL_WEB_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - - - name: WEB_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.clusterIP }' - - - name: WEB_PORT - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="http")].port }' - - - name: USER - value: {{ .Values.auth.username }} - - {{- if (not .Values.auth.existingSecret) }} - - name: AIRFLOW_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-password }' - - - name: AIRFLOW_FERNET_KEY - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-fernet-key }' - - - name: AIRFLOW_SECRET_KEY - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-secret-key }' - {{- end }} diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json b/addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/apollo/2.3/plans/standard-1c2g2w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml deleted file mode 100644 index a10be35d..00000000 --- a/addons/apollo/2.3/plans/standard-1c2g2w/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-1c2g2w" -id: 75f949c8-8366-4805-aa8b-553de0ec6c24 -description: "airflow standard-1c2g2w plan which limit resources 2 workers per worker 1 core memory size 2Gi." -displayName: "standard-1c2g2w" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml b/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml deleted file mode 100644 index caeaba82..00000000 --- a/addons/apollo/2.3/plans/standard-1c2g2w/values.yaml +++ /dev/null @@ -1,60 +0,0 @@ -## @param fullnameOverride String to fully override common.names.fullname template -## -fullnameOverride: hb-airflow-standard-1c2g2w - -## @section Airflow web parameters - -web: - ## @param web.replicaCount Number of Airflow web replicas - ## - replicaCount: 1 - ## Airflow web resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param web.resources.limits The resources limits for the Airflow web containers - ## @param web.resources.requests The requested resources for the Airflow web containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi - -## @section Airflow scheduler parameters - -scheduler: - ## @param scheduler.replicaCount Number of scheduler replicas - ## - replicaCount: 1 - ## Airflow scheduler resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param scheduler.resources.limits The resources limits for the Airflow scheduler containers - ## @param scheduler.resources.requests The requested resources for the Airflow scheduler containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi - -## @section Airflow worker parameters - -worker: - ## @param worker.replicaCount Number of Airflow worker replicas - ## - replicaCount: 2 - ## Airflow worker resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param worker.resources.limits The resources limits for the Airflow worker containers - ## @param worker.resources.requests The requested resources for the Airflow worker containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi From 0fe369f4694ec0045a68db9837a5597380689043 Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 17 Mar 2025 11:15:52 +0800 Subject: [PATCH 43/93] chore(addons): add nessie add postgres-cluster --- .gitignore | 6 +- addons/index.yaml | 7 +- addons/nessie/0.103/chart/nessie/Chart.yaml | 21 + addons/nessie/0.103/chart/nessie/README.md | 351 +++++++ .../chart/nessie/simple-demo-values.yaml | 44 + .../0.103/chart/nessie/templates/NOTES.txt | 38 + .../0.103/chart/nessie/templates/_helpers.tpl | 519 +++++++++ .../chart/nessie/templates/configmap.yaml | 225 ++++ .../chart/nessie/templates/deployment.yaml | 199 ++++ .../0.103/chart/nessie/templates/hpa.yaml | 65 ++ .../0.103/chart/nessie/templates/ingress.yaml | 75 ++ .../0.103/chart/nessie/templates/secrets.yaml | 19 + .../0.103/chart/nessie/templates/service.yaml | 118 +++ .../nessie/templates/serviceaccount.yaml | 31 + .../nessie/templates/servicemonitor.yaml | 47 + .../0.103/chart/nessie/templates/storage.yaml | 63 ++ addons/nessie/0.103/chart/nessie/values.yaml | 987 ++++++++++++++++++ addons/nessie/0.103/meta.yaml | 30 + .../0.103/plans/standard-4c4g/bind.yaml | 19 + .../0.103/plans/standard-4c4g/meta.yaml | 6 + .../0.103/plans/standard-4c4g/values.yaml | 11 + .../0.103/plans/standard-8c8g/bind.yaml | 19 + .../0.103/plans/standard-8c8g/meta.yaml | 6 + .../0.103/plans/standard-8c8g/values.yaml | 11 + .../15/chart/postgresql-cluster/Chart.yaml | 4 +- .../16/chart/postgresql-cluster-16/Chart.yaml | 28 + .../16/chart/postgresql-cluster-16/README.md | 166 +++ .../postgresql-cluster-16/templates/NOTES.txt | 25 + .../templates/_helpers.tpl | 219 ++++ .../templates/cm-backup.yaml | 16 + .../templates/cm-logicalbackup .yaml | 19 + .../templates/cm-patroni.yaml | 20 + .../templates/cm-postgresql.yaml | 18 + .../templates/cronjob.yaml | 43 + .../templates/logicalbackup-cronjob.yaml | 69 ++ .../templates/networkpolicy.yaml | 54 + .../postgresql-cluster-16/templates/role.yaml | 49 + .../templates/rolebinding.yaml | 19 + .../postgresql-cluster-16/templates/sec.yaml | 18 + .../templates/serviceaccount.yaml | 12 + .../templates/statefulset.yaml | 273 +++++ .../templates/svc-config.yaml | 11 + .../templates/svc-master.yaml | 24 + .../templates/svc-metrics.yaml | 32 + .../templates/svc-relp.yaml | 26 + .../postgresql-cluster-16/templates/svc.yaml | 18 + .../chart/postgresql-cluster-16/values.yaml | 441 ++++++++ addons/postgresql-cluster/16/meta.yaml | 30 + .../16/plans/standard-16c64g400/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-16c64g400/meta.yaml | 6 + .../16/plans/standard-16c64g400/values.yaml | 81 ++ .../16/plans/standard-2c4g20/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-2c4g20/meta.yaml | 6 + .../16/plans/standard-2c4g20/values.yaml | 81 ++ .../16/plans/standard-2c8g50/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-2c8g50/meta.yaml | 6 + .../16/plans/standard-2c8g50/values.yaml | 83 ++ .../16/plans/standard-32c128g800/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-32c128g800/meta.yaml | 6 + .../16/plans/standard-32c128g800/values.yaml | 82 ++ .../16/plans/standard-32c64g4000/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-32c64g4000/meta.yaml | 6 + .../16/plans/standard-32c64g4000/values.yaml | 82 ++ .../16/plans/standard-4c16g100/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-4c16g100/meta.yaml | 6 + .../16/plans/standard-4c16g100/values.yaml | 83 ++ .../16/plans/standard-8c32g200/bind.yaml | 41 + .../create-instance-schema.json | 12 + .../16/plans/standard-8c32g200/meta.yaml | 6 + .../16/plans/standard-8c32g200/values.yaml | 82 ++ 76 files changed, 5534 insertions(+), 4 deletions(-) create mode 100644 addons/nessie/0.103/chart/nessie/Chart.yaml create mode 100644 addons/nessie/0.103/chart/nessie/README.md create mode 100644 addons/nessie/0.103/chart/nessie/simple-demo-values.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/NOTES.txt create mode 100644 addons/nessie/0.103/chart/nessie/templates/_helpers.tpl create mode 100644 addons/nessie/0.103/chart/nessie/templates/configmap.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/deployment.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/hpa.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/ingress.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/secrets.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/service.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/serviceaccount.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/servicemonitor.yaml create mode 100644 addons/nessie/0.103/chart/nessie/templates/storage.yaml create mode 100644 addons/nessie/0.103/chart/nessie/values.yaml create mode 100644 addons/nessie/0.103/meta.yaml create mode 100644 addons/nessie/0.103/plans/standard-4c4g/bind.yaml create mode 100644 addons/nessie/0.103/plans/standard-4c4g/meta.yaml create mode 100644 addons/nessie/0.103/plans/standard-4c4g/values.yaml create mode 100644 addons/nessie/0.103/plans/standard-8c8g/bind.yaml create mode 100644 addons/nessie/0.103/plans/standard-8c8g/meta.yaml create mode 100644 addons/nessie/0.103/plans/standard-8c8g/values.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/Chart.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/README.md create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/NOTES.txt create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/_helpers.tpl create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-backup.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-logicalbackup .yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-patroni.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-postgresql.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cronjob.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/logicalbackup-cronjob.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/networkpolicy.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/role.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/rolebinding.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/sec.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/serviceaccount.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-config.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-master.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-metrics.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-relp.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc.yaml create mode 100644 addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml create mode 100644 addons/postgresql-cluster/16/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-16c64g400/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-2c4g20/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-2c8g50/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-32c128g800/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-32c64g4000/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-4c16g100/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json create mode 100644 addons/postgresql-cluster/16/plans/standard-8c32g200/meta.yaml create mode 100644 addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml diff --git a/.gitignore b/.gitignore index be9ec9db..4817d62e 100644 --- a/.gitignore +++ b/.gitignore @@ -47,4 +47,8 @@ Temporary Items toCopy/ out/ Chart.lock -*.tgz \ No newline at end of file +*.tgz + +*.fix +addons/grafana/10/dashborad/ +addons/prometheus/prom-value.yaml diff --git a/addons/index.yaml b/addons/index.yaml index 010b878c..128d481b 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -41,6 +41,8 @@ entries: postgresql-cluster: - version: 15 description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." + - version: 16 + description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." seaweedfs: - version: 3 description: "SeaweedFS is a fast distributed storage system for blobs, objects, files, and data lake, for billions of files." @@ -69,4 +71,7 @@ entries: description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." yugabytedb: - version: 2024 - description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " \ No newline at end of file + description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " + nessie: + - version: "0.103" + description: "Transactional Catalog for Data Lakes with Git-like semantics . " \ No newline at end of file diff --git a/addons/nessie/0.103/chart/nessie/Chart.yaml b/addons/nessie/0.103/chart/nessie/Chart.yaml new file mode 100644 index 00000000..e1ba8165 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +description: A Helm chart for Nessie +home: https://projectnessie.org/ +icon: https://raw.githubusercontent.com/projectnessie/nessie/main/site/docs/img/nessie.svg +keywords: +- nessie +- iceberg +- delta +- data lake +- transactional catalog +- git-like semantics +maintainers: +- name: nastra +- name: snazy +- name: dimas-b +- name: adutra +name: nessie +sources: +- https://github.com/projectnessie/nessie +type: application +version: 0.103.0 diff --git a/addons/nessie/0.103/chart/nessie/README.md b/addons/nessie/0.103/chart/nessie/README.md new file mode 100644 index 00000000..1b926af0 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/README.md @@ -0,0 +1,351 @@ + + +# Nessie Helm chart + +![Version: 0.103.0](https://img.shields.io/badge/Version-0.103.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +A Helm chart for Nessie. + +**Homepage:** + +## Maintainers +* [nastra](https://github.com/nastra) +* [snazy](https://github.com/snazy) +* [dimas-b](https://github.com/dimas-b) +* [adutra](https://github.com/adutra) + +## Source Code + +* + +## Documentation + +For users: see [Nessie on Kubernetes](https://projectnessie.org/try/kubernetes/) +for more information. + +For developers: to update this README file, e.g. when the chart is updated or when the template file +`README.md.gotmpl` is updated, install the [helm-docs](https://github.com/norwoodj/helm-docs) tool, +then run: + +```bash +helm-docs --chart-search-root=helm +``` + +Note: don't modify the README.md file directly, please modify `README.md.gotmpl` instead. + +## Installation + +### From Helm repo +```bash +helm repo add nessie-helm https://charts.projectnessie.org +helm repo update +helm install --namespace nessie-ns nessie nessie-helm/nessie +``` + +### From local directory (for development purposes ONLY!) + +From Nessie repo root: + +```bash +helm install --namespace nessie-ns nessie helm/nessie +``` + +Beware that the local chart may contain changes that are not yet released. + +### Uninstalling the chart + +```bash +helm uninstall --namespace nessie-ns nessie +``` + +## Debugging, linting & testing locally + +To debug the rendering of Helm templates: + +```bash +helm template nessie -n nessie-ns helm/nessie --debug +``` + +You can also provide a values file: + +```bash +helm template nessie -n nessie-ns helm/nessie --values helm/nessie/ci/inmemory-values.yaml --debug +``` + +For linting and testing, the [chart-testing](https://github.com/helm/chart-testing) tool (`ct`) must +be installed. + +To lint the Helm chart, use `ct lint`: + +```bash +ct lint --charts helm/nessie +``` + +To test the charts against a local running minikube cluster, first create the namespace and apply the fixtures: + +```bash +kubectl create namespace nessie-ns +kubectl apply --namespace nessie-ns $(find helm/nessie/ci/fixtures -name "*.yaml" -exec echo -n "-f {} " \;) +``` + +Then run the tests with `ct install`: + +```bash +ct install --charts ./helm/nessie --namespace nessie-ns --debug +``` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| advancedConfig | object | `{}` | Advanced configuration. You can pass here any valid Nessie or Quarkus configuration property. Any property that is defined here takes precedence over all the other configuration values generated by this chart. Properties can be passed "flattened" or as nested YAML objects (see examples below). | +| affinity | object | `{}` | Affinity and anti-affinity for nessie pods. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity. | +| authentication.enabled | bool | `false` | Specifies whether authentication for the nessie server should be enabled. | +| authentication.oidcAuthServerUrl | string | `nil` | Sets the base URL of the OpenID Connect (OIDC) server. Required if authentication is enabled (unless local token introspection is enforced through advanced configuration). | +| authentication.oidcClientId | string | `"nessie"` | Set the OIDC client ID. If Nessie must contact the OIDC server, this is the client ID that will be used to identify the application. | +| authentication.oidcClientSecret | object | `{}` | Set the OIDC client secret. Whether the client secret is required depends on the OIDC server configuration. For Keycloak, the client secret is generally not required as the returned tokens can be introspected locally by Nessie. If token introspection requires a round-trip to the OIDC server, the client secret is required. | +| authorization.enabled | bool | `false` | Specifies whether authorization for the nessie server should be enabled. | +| authorization.rules | object | `{}` | The authorization rules when authorization.enabled=true. Example rules can be found at https://projectnessie.org/features/metadata_authorization/#authorization-rules | +| autoscaling.enabled | bool | `false` | Specifies whether automatic horizontal scaling should be enabled. Do not enable this when using ROCKSDB version store type. | +| autoscaling.maxReplicas | int | `3` | The maximum number of replicas to maintain. | +| autoscaling.minReplicas | int | `1` | The minimum number of replicas to maintain. | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Optional; set to zero or empty to disable. | +| autoscaling.targetMemoryUtilizationPercentage | string | `nil` | Optional; set to zero or empty to disable. | +| bigtable.appProfileId | string | `"default"` | The Google Cloud Bigtable app profile ID. | +| bigtable.instanceId | string | `"nessie-bigtable"` | The Google Cloud Bigtable instance ID. | +| bigtable.projectId | string | `"my-gcp-project"` | The Google Cloud project ID. | +| bigtable.secret | object | `{}` | The secret to use to authenticate against BigTable. When provided, it is assumed that authentication will use a service account JSON key. See https://cloud.google.com/iam/docs/keys-create-delete for details on how to create a service account key. If left empty, then Workload Identity usage is assumed instead; in this case, make sure that the pod's service account has been granted access to BigTable. See https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to for details on how to create a suitable service account. Important: when using Workload Identity, unless the cluster is in Autopilot mode, it is also required to add the following nodeSelector label: iam.gke.io/gke-metadata-server-enabled: "true" This is not done automatically by this chart because this selector would be invalid for Autopilot clusters. | +| cassandra.contactPoints | string | `"cassandra.cassandra.svc.cluster.local:9042"` | The contact points for the Cassandra cluster. At least one contact point must be provided, but more can be added for redundancy. The format is a comma-separated list of host:port elements. | +| cassandra.keyspace | string | `"nessie"` | | +| cassandra.localDatacenter | string | `"datacenter1"` | | +| cassandra.secret.name | string | `"cassandra-creds"` | The secret name to pull Cassandra credentials from. | +| cassandra.secret.password | string | `"cassandra_password"` | The secret key storing the Cassandra password. | +| cassandra.secret.username | string | `"cassandra_username"` | The secret key storing the Cassandra username. | +| catalog | object | `{"enabled":false,"iceberg":{"configDefaults":{},"configOverrides":{},"defaultWarehouse":null,"objectStoresHealthCheckEnabled":true,"warehouses":[{"configDefaults":{},"configOverrides":{},"location":null,"name":null}]},"storage":{"adls":{"advancedConfig":{},"defaultOptions":{"accountSecret":{"accountKey":null,"accountName":null,"name":null},"authType":null,"endpoint":null,"externalEndpoint":null,"maxRetries":null,"maxRetryDelay":null,"retryDelay":null,"retryPolicy":null,"sasTokenSecret":{"name":null,"sasToken":null},"tryTimeout":null},"filesystems":[],"transport":{"connectTimeout":null,"connectionIdleTimeout":null,"maxHttpConnections":null,"readBlockSize":null,"readTimeout":null,"writeBlockSize":null,"writeTimeout":null}},"gcs":{"buckets":[],"defaultOptions":{"authCredentialsJsonSecret":{"key":null,"name":null},"authType":null,"clientLibToken":null,"decryptionKey":null,"deleteBatchSize":null,"encryptionKey":null,"externalHost":null,"host":null,"oauth2TokenSecret":{"expiresAt":null,"name":null,"token":null},"projectId":null,"quotaProjectId":null,"readChunkSize":null,"userProject":null,"writeChunkSize":null},"transport":{"connectTimeout":null,"initialRetryDelay":null,"initialRpcTimeout":null,"logicalTimeout":null,"maxAttempts":null,"maxRetryDelay":null,"maxRpcTimeout":null,"readTimeout":null,"retryDelayMultiplier":null,"rpcTimeoutMultiplier":null,"totalTimeout":null}},"retryAfter":null,"s3":{"buckets":[],"defaultOptions":{"accessKeySecret":{"awsAccessKeyId":null,"awsSecretAccessKey":null,"name":null},"accessPoint":null,"allowCrossRegionAccessPoint":null,"authType":null,"clientIam":{"enabled":null,"externalId":null,"policy":null,"roleArn":null,"roleSessionName":null,"sessionDuration":null,"statements":null},"endpoint":null,"externalEndpoint":null,"pathStyleAccess":null,"region":null,"requestSigningEnabled":null,"serverIam":{"enabled":null,"externalId":null,"policy":null,"roleArn":null,"roleSessionName":null,"sessionDuration":null},"stsEndpoint":null},"sessionCredentials":{"sessionCredentialCacheMaxEntries":null,"sessionCredentialRefreshGracePeriod":null,"stsClientsCacheMaxEntries":null},"transport":{"connectTimeout":null,"connectionAcquisitionTimeout":null,"connectionMaxIdleTime":null,"connectionTimeToLive":null,"expectContinueEnabled":null,"maxHttpConnections":null,"readTimeout":null}}}}` | The Nessie catalog server configuration. | +| catalog.enabled | bool | `false` | Whether to enable the REST catalog service. | +| catalog.iceberg | object | `{"configDefaults":{},"configOverrides":{},"defaultWarehouse":null,"objectStoresHealthCheckEnabled":true,"warehouses":[{"configDefaults":{},"configOverrides":{},"location":null,"name":null}]}` | Iceberg catalog settings. | +| catalog.iceberg.configDefaults | object | `{}` | Iceberg config defaults applicable to all clients and warehouses. Any properties that are common to all iceberg clients should be included here. They will be passed to all clients on all warehouses as config defaults. These defaults can be overridden on a per-warehouse basis, see below. | +| catalog.iceberg.configOverrides | object | `{}` | Iceberg config overrides applicable to all clients and warehouses. Any properties that are common to all iceberg clients should be included here. They will be passed to all clients on all warehouses as config overrides. These overrides can be overridden on a per-warehouse basis, see below. | +| catalog.iceberg.defaultWarehouse | string | `nil` | The default warehouse name. Required. This is just a symbolic name; it must refer to a declared warehouse below. | +| catalog.iceberg.warehouses | list | `[{"configDefaults":{},"configOverrides":{},"location":null,"name":null}]` | Iceberg warehouses. Each warehouse is a location where Iceberg tables are stored. Each warehouse has a name, a location, and optional config defaults and overrides. At least one warehouse must be defined. | +| catalog.iceberg.warehouses[0] | object | `{"configDefaults":{},"configOverrides":{},"location":null,"name":null}` | Symbolic name of the warehouse. Required. | +| catalog.iceberg.warehouses[0].configDefaults | object | `{}` | Iceberg config defaults specific to this warehouse. They override any defaults specified above in catalog.iceberg.configDefaults. | +| catalog.iceberg.warehouses[0].configOverrides | object | `{}` | Iceberg config overrides specific to this warehouse. They override any defaults specified above in catalog.iceberg.configOverrides. | +| catalog.iceberg.warehouses[0].location | string | `nil` | Location of the warehouse. Required. Used to determine the base location of a table. Scheme must be either s3 (Amazon S3), gs (Google GCS) or abfs / abfss (Azure ADLS). Storage properties for each location can be defined below. | +| catalog.storage | object | `{"adls":{"advancedConfig":{},"defaultOptions":{"accountSecret":{"accountKey":null,"accountName":null,"name":null},"authType":null,"endpoint":null,"externalEndpoint":null,"maxRetries":null,"maxRetryDelay":null,"retryDelay":null,"retryPolicy":null,"sasTokenSecret":{"name":null,"sasToken":null},"tryTimeout":null},"filesystems":[],"transport":{"connectTimeout":null,"connectionIdleTimeout":null,"maxHttpConnections":null,"readBlockSize":null,"readTimeout":null,"writeBlockSize":null,"writeTimeout":null}},"gcs":{"buckets":[],"defaultOptions":{"authCredentialsJsonSecret":{"key":null,"name":null},"authType":null,"clientLibToken":null,"decryptionKey":null,"deleteBatchSize":null,"encryptionKey":null,"externalHost":null,"host":null,"oauth2TokenSecret":{"expiresAt":null,"name":null,"token":null},"projectId":null,"quotaProjectId":null,"readChunkSize":null,"userProject":null,"writeChunkSize":null},"transport":{"connectTimeout":null,"initialRetryDelay":null,"initialRpcTimeout":null,"logicalTimeout":null,"maxAttempts":null,"maxRetryDelay":null,"maxRpcTimeout":null,"readTimeout":null,"retryDelayMultiplier":null,"rpcTimeoutMultiplier":null,"totalTimeout":null}},"retryAfter":null,"s3":{"buckets":[],"defaultOptions":{"accessKeySecret":{"awsAccessKeyId":null,"awsSecretAccessKey":null,"name":null},"accessPoint":null,"allowCrossRegionAccessPoint":null,"authType":null,"clientIam":{"enabled":null,"externalId":null,"policy":null,"roleArn":null,"roleSessionName":null,"sessionDuration":null,"statements":null},"endpoint":null,"externalEndpoint":null,"pathStyleAccess":null,"region":null,"requestSigningEnabled":null,"serverIam":{"enabled":null,"externalId":null,"policy":null,"roleArn":null,"roleSessionName":null,"sessionDuration":null},"stsEndpoint":null},"sessionCredentials":{"sessionCredentialCacheMaxEntries":null,"sessionCredentialRefreshGracePeriod":null,"stsClientsCacheMaxEntries":null},"transport":{"connectTimeout":null,"connectionAcquisitionTimeout":null,"connectionMaxIdleTime":null,"connectionTimeToLive":null,"expectContinueEnabled":null,"maxHttpConnections":null,"readTimeout":null}}}` | Catalog storage settings. | +| catalog.storage.adls.advancedConfig | object | `{}` | Custom ADLS configuration options, see javadocs of com.azure.core.util.Configuration. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.defaultOptions.accountSecret | object | `{"accountKey":null,"accountName":null,"name":null}` | A secret containing the account name and key to use. Required when authType is STORAGE_SHARED_KEY. | +| catalog.storage.adls.defaultOptions.accountSecret.accountKey | string | `nil` | Secret key containing the account key. | +| catalog.storage.adls.defaultOptions.accountSecret.accountName | string | `nil` | Secret key containing the fully-qualified account name, e.g. "myaccount.dfs.core.windows.net". | +| catalog.storage.adls.defaultOptions.accountSecret.name | string | `nil` | Name of the secret containing the account name and key. | +| catalog.storage.adls.defaultOptions.authType | string | `nil` | The authentication type to use. Valid values are: NONE, STORAGE_SHARED_KEY, SAS_TOKEN, APPLICATION_DEFAULT. The default is NONE. | +| catalog.storage.adls.defaultOptions.endpoint | string | `nil` | Custom HTTP endpoint. In case clients need to use a different URI, use externalEndpoint. | +| catalog.storage.adls.defaultOptions.externalEndpoint | string | `nil` | Custom HTTP endpoint to be used by clients. If not set, the endpoint value is used. | +| catalog.storage.adls.defaultOptions.maxRetries | string | `nil` | The maximum number of retries. Must be a positive integer. Default is 4. Optional. Valid if retryPolicy is EXPONENTIAL_BACKOFF or FIXED_DELAY. | +| catalog.storage.adls.defaultOptions.maxRetryDelay | string | `nil` | Specifies the maximum delay allowed before retrying an operation, default value is PT120s (120 seconds). Must be a valid ISO duration. Valid if retryPolicy is EXPONENTIAL_BACKOFF. | +| catalog.storage.adls.defaultOptions.retryDelay | string | `nil` | Specifies the amount of delay to use before retrying an operation, default value is PT4S (4 seconds) when retryPolicy is EXPONENTIAL_BACKOFF and PT30S (30 seconds) when retryPolicy is FIXED_DELAY. Must be a valid ISO duration. | +| catalog.storage.adls.defaultOptions.retryPolicy | string | `nil` | The retry strategy to use. Valid values are: NONE, EXPONENTIAL_BACKOFF, FIXED_DELAY. The default is EXPONENTIAL_BACKOFF. | +| catalog.storage.adls.defaultOptions.sasTokenSecret | object | `{"name":null,"sasToken":null}` | A secret containing the SAS token to use. Required when authType is SAS_TOKEN. | +| catalog.storage.adls.defaultOptions.sasTokenSecret.name | string | `nil` | Name of the secret containing the SAS token. | +| catalog.storage.adls.defaultOptions.sasTokenSecret.sasToken | string | `nil` | Secret key containing the SAS token. | +| catalog.storage.adls.defaultOptions.tryTimeout | string | `nil` | The maximum time allowed before a request is cancelled and assumed failed, default is Integer.MAX_VALUE. Optional. Must be a valid ISO duration. Valid if retryPolicy is EXPONENTIAL_BACKOFF or FIXED_DELAY. | +| catalog.storage.adls.filesystems | list | `[]` | Per-filesystem ADLS settings. Override the general settings above. | +| catalog.storage.adls.transport | object | `{"connectTimeout":null,"connectionIdleTimeout":null,"maxHttpConnections":null,"readBlockSize":null,"readTimeout":null,"writeBlockSize":null,"writeTimeout":null}` | ADLS transport settings. Not overridable on a per-bucket basis. | +| catalog.storage.adls.transport.connectTimeout | string | `nil` | Sets the connection timeout for a request to be sent. The default is PT10S (10 seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.connectionIdleTimeout | string | `nil` | Sets the maximum idle time for a connection to be kept alive. The default is PT60S (60 seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.maxHttpConnections | string | `nil` | The default maximum connection pool size is determined by the underlying HTTP client. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.readBlockSize | string | `nil` | The size of each data chunk returned from the service in bytes. The default value is 4 MB. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.readTimeout | string | `nil` | Sets the read timeout duration used when reading the server response. The default is PT60S (60 seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.writeBlockSize | string | `nil` | Sets the block size in bytes to transfer at a time. Not overridable on a per-filesystem basis. | +| catalog.storage.adls.transport.writeTimeout | string | `nil` | Sets the write timeout duration used when writing the request to the server. The default is PT60S (60 seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. | +| catalog.storage.gcs.buckets | list | `[]` | Per-bucket GCS settings. Override the general settings above. | +| catalog.storage.gcs.defaultOptions.authCredentialsJsonSecret | object | `{"key":null,"name":null}` | The Google Cloud service account key secret. This is required when authType is USER or SERVICE_ACCOUNT. | +| catalog.storage.gcs.defaultOptions.authCredentialsJsonSecret.key | string | `nil` | The secret key storing the Google Cloud service account JSON key. | +| catalog.storage.gcs.defaultOptions.authCredentialsJsonSecret.name | string | `nil` | The secret name to pull a valid Google Cloud service account key from. | +| catalog.storage.gcs.defaultOptions.authType | string | `nil` | The authentication type to use. Valid values are: NONE, USER, SERVICE_ACCOUNT, ACCESS_TOKEN, APPLICATION_DEFAULT. The default is NONE. | +| catalog.storage.gcs.defaultOptions.clientLibToken | string | `nil` | The Google client lib token. | +| catalog.storage.gcs.defaultOptions.decryptionKey | string | `nil` | Customer-supplied AES256 key for blob decryption when reading. Currently unsupported. | +| catalog.storage.gcs.defaultOptions.deleteBatchSize | string | `nil` | The delete batch size. | +| catalog.storage.gcs.defaultOptions.encryptionKey | string | `nil` | Customer-supplied AES256 key for blob encryption when writing. Currently unsupported. | +| catalog.storage.gcs.defaultOptions.externalHost | string | `nil` | When using a specific endpoint, see host, and the endpoint URIs for the Nessie server differ, you can specify the URI passed down to clients using this setting. Otherwise, clients will receive the value from the host setting. | +| catalog.storage.gcs.defaultOptions.host | string | `nil` | The default endpoint override to use. The endpoint is almost always used for testing purposes. If the endpoint URIs for the Nessie server and clients differ, this one defines the endpoint used for the Nessie server. | +| catalog.storage.gcs.defaultOptions.oauth2TokenSecret | object | `{"expiresAt":null,"name":null,"token":null}` | The oauth2 token secret. This is required when authType is ACCESS_TOKEN. | +| catalog.storage.gcs.defaultOptions.projectId | string | `nil` | The Google project ID. | +| catalog.storage.gcs.defaultOptions.quotaProjectId | string | `nil` | The Google quota project ID. | +| catalog.storage.gcs.defaultOptions.readChunkSize | string | `nil` | The read chunk size in bytes. Must be a valid ISO duration. | +| catalog.storage.gcs.defaultOptions.userProject | string | `nil` | Optionally specify the user project (Google term). | +| catalog.storage.gcs.defaultOptions.writeChunkSize | string | `nil` | The write chunk size in bytes. Must be a valid ISO duration. | +| catalog.storage.gcs.transport | object | `{"connectTimeout":null,"initialRetryDelay":null,"initialRpcTimeout":null,"logicalTimeout":null,"maxAttempts":null,"maxRetryDelay":null,"maxRpcTimeout":null,"readTimeout":null,"retryDelayMultiplier":null,"rpcTimeoutMultiplier":null,"totalTimeout":null}` | GCS transport settings. Not overridable on a per-bucket basis. | +| catalog.storage.gcs.transport.connectTimeout | string | `nil` | Override the default connection timeout. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.initialRetryDelay | string | `nil` | Override the default initial retry delay. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.initialRpcTimeout | string | `nil` | Override the default initial RPC timeout. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.logicalTimeout | string | `nil` | Override the default logical request timeout. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.maxAttempts | string | `nil` | Override the default maximum number of attempts. | +| catalog.storage.gcs.transport.maxRetryDelay | string | `nil` | Override the default maximum retry delay. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.maxRpcTimeout | string | `nil` | Override the default maximum RPC timeout. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.readTimeout | string | `nil` | Override the default read timeout. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.retryDelayMultiplier | string | `nil` | Override the default retry delay multiplier. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.rpcTimeoutMultiplier | string | `nil` | Override the default RPC timeout multiplier. Must be a valid ISO duration. | +| catalog.storage.gcs.transport.totalTimeout | string | `nil` | Override the default total timeout. Must be a valid ISO duration. | +| catalog.storage.retryAfter | string | `nil` | Interval after which a request is retried when Storage responds with some "retry later" error. Must be a valid ISO duration. | +| catalog.storage.s3.buckets | list | `[]` | Per-bucket S3 settings. Override the general settings above. | +| catalog.storage.s3.defaultOptions.accessKeySecret | object | `{"awsAccessKeyId":null,"awsSecretAccessKey":null,"name":null}` | AWS credentials. Required when serverAuthenticationMode is STATIC. | +| catalog.storage.s3.defaultOptions.accessKeySecret.awsAccessKeyId | string | `nil` | The secret key storing the AWS secret key id. | +| catalog.storage.s3.defaultOptions.accessKeySecret.awsSecretAccessKey | string | `nil` | The secret key storing the AWS secret access key. | +| catalog.storage.s3.defaultOptions.accessKeySecret.name | string | `nil` | The secret name to pull AWS credentials from. | +| catalog.storage.s3.defaultOptions.accessPoint | string | `nil` | AWS Access point for this bucket. Access points can be used to perform S3 operations by specifying a mapping of bucket to access points. This is useful for multi-region access, cross-region access, disaster recovery, etc. See https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html. | +| catalog.storage.s3.defaultOptions.allowCrossRegionAccessPoint | string | `nil` | Authorize cross-region calls when contacting an access point. The default is false. | +| catalog.storage.s3.defaultOptions.authType | string | `nil` | Controls the authentication mode for the Catalog server. Valid values are: - APPLICATION_GLOBAL: Use the default AWS credentials provider chain. - STATIC: Static credentials provided through the accessKeySecret option. The default is STATIC. | +| catalog.storage.s3.defaultOptions.clientIam.enabled | string | `nil` | Whether to enable vended credentials functionality. If this option is enabled, the server will temporarily assume the configured role, then pass the returned session credentials down to the client, for each table that is created, updated or registered. Vended credentials are not cached server-side. | +| catalog.storage.s3.defaultOptions.clientIam.externalId | string | `nil` | An identifier for the party assuming the role. This parameter must match the external ID configured in IAM rules that govern the assume role process for the specified roleArn. | +| catalog.storage.s3.defaultOptions.clientIam.policy | string | `nil` | The IAM policy in JSON format to be used as an inline session policy when calling the assume-role endpoint. Optional. | +| catalog.storage.s3.defaultOptions.clientIam.roleArn | string | `nil` | The ARN of the role to assume for accessing S3 data. This parameter is required for Amazon S3, but may not be required for other storage providers (e.g. Minio does not use it at all). | +| catalog.storage.s3.defaultOptions.clientIam.roleSessionName | string | `nil` | An identifier for the assumed role session. This parameter is most important in cases when the same role is assumed by different principals in different use cases. | +| catalog.storage.s3.defaultOptions.clientIam.sessionDuration | string | `nil` | A higher bound estimate of the expected duration of client "sessions" working with data in this bucket. A session, for example, is the lifetime of an Iceberg REST catalog object on the client side. This value is used for validating expiration times of credentials associated with the warehouse. If unset, a default of one hour is assumed. | +| catalog.storage.s3.defaultOptions.clientIam.statements | string | `nil` | Additional IAM policy statements in JSON format to add to generated per-table IAM policies. | +| catalog.storage.s3.defaultOptions.endpoint | string | `nil` | Endpoint URI, required for private clouds. Optional; if not provided, the default is used. | +| catalog.storage.s3.defaultOptions.externalEndpoint | string | `nil` | Endpoint URI, required for private clouds. Optional; if not provided, the default is used. If the endpoint URIs for the Nessie server and clients differ, this one defines the endpoint used for the Nessie server. | +| catalog.storage.s3.defaultOptions.pathStyleAccess | string | `nil` | Whether to use path-style access. Optional; if not provided, the default is used. If true, path-style access will be used, as in: https:///. If false, a virtual-hosted style will be used instead, as in: https://.. | +| catalog.storage.s3.defaultOptions.region | string | `nil` | DNS name of the region, required for AWS. | +| catalog.storage.s3.defaultOptions.requestSigningEnabled | string | `nil` | Optional parameter to disable S3 request signing. Default is to enable S3 request signing. | +| catalog.storage.s3.defaultOptions.serverIam | object | `{"enabled":null,"externalId":null,"policy":null,"roleArn":null,"roleSessionName":null,"sessionDuration":null}` | Settings only relevant when clientAuthenticationMode is ASSUME_ROLE. | +| catalog.storage.s3.defaultOptions.serverIam.enabled | string | `nil` | Whether to enable server assume-role functionality. If this option is enabled, the server will attempt to assume the configured role at startup and cache the returned session credentials. | +| catalog.storage.s3.defaultOptions.serverIam.externalId | string | `nil` | An identifier for the party assuming the role. This parameter must match the external ID configured in IAM rules that govern the assume role process for the specified roleArn. | +| catalog.storage.s3.defaultOptions.serverIam.policy | string | `nil` | The IAM policy in JSON format to be used as an inline session policy when calling the assume-role endpoint. Optional. | +| catalog.storage.s3.defaultOptions.serverIam.roleArn | string | `nil` | The ARN of the role to assume for accessing S3 data. This parameter is required for Amazon S3, but may not be required for other storage providers (e.g. Minio does not use it at all). | +| catalog.storage.s3.defaultOptions.serverIam.roleSessionName | string | `nil` | An identifier for the assumed role session. This parameter is most important in cases when the same role is assumed by different principals in different use cases. | +| catalog.storage.s3.defaultOptions.serverIam.sessionDuration | string | `nil` | A higher bound estimate of the expected duration of client "sessions" working with data in this bucket. A session, for example, is the lifetime of an Iceberg REST catalog object on the client side. This value is used for validating expiration times of credentials associated with the warehouse. If unset, a default of one hour is assumed. | +| catalog.storage.s3.defaultOptions.stsEndpoint | string | `nil` | The STS endpoint. Optional; if not provided, the default is used. This parameter must be set if the cloud provider is not AMAZON and the catalog is configured to use S3 sessions (e.g. to use the "assume role" functionality). | +| catalog.storage.s3.sessionCredentials.sessionCredentialCacheMaxEntries | string | `nil` | Maximum number of entries to keep in the session credentials cache (assumed role credentials). Not overridable on a per-bucket basis. The default is 1000. | +| catalog.storage.s3.sessionCredentials.sessionCredentialRefreshGracePeriod | string | `nil` | The time period to subtract from the S3 session credentials (assumed role credentials) expiry time to define the time when those credentials become eligible for refreshing. Not overridable on a per-bucket basis. The default is PT5M (5 minutes). | +| catalog.storage.s3.sessionCredentials.stsClientsCacheMaxEntries | string | `nil` | Maximum number of entries to keep in the STS clients cache. Not overridable on a per-bucket basis. The default is 50. | +| catalog.storage.s3.transport | object | `{"connectTimeout":null,"connectionAcquisitionTimeout":null,"connectionMaxIdleTime":null,"connectionTimeToLive":null,"expectContinueEnabled":null,"maxHttpConnections":null,"readTimeout":null}` | S3 transport settings. Not overridable on a per-bucket basis. | +| catalog.storage.s3.transport.connectTimeout | string | `nil` | Override the default TCP connect timeout. Must be a valid ISO duration. | +| catalog.storage.s3.transport.connectionAcquisitionTimeout | string | `nil` | Override default connection acquisition timeout. This is the time a request will wait for a connection from the pool. Must be a valid ISO duration. | +| catalog.storage.s3.transport.connectionMaxIdleTime | string | `nil` | Override default max idle time of a pooled connection. Must be a valid ISO duration. | +| catalog.storage.s3.transport.connectionTimeToLive | string | `nil` | Override default time-time of a pooled connection. Must be a valid ISO duration. | +| catalog.storage.s3.transport.expectContinueEnabled | string | `nil` | Override default behavior whether to expect an HTTP/100-Continue. Must be a valid ISO duration. | +| catalog.storage.s3.transport.maxHttpConnections | string | `nil` | Override the default maximum number of pooled connections. | +| catalog.storage.s3.transport.readTimeout | string | `nil` | Override the default connection read timeout. Must be a valid ISO duration. | +| configMapLabels | object | `{}` | Additional Labels to apply to nessie configmap. | +| deploymentStrategy | object | `{}` | Override the strategy for nessie deployment. Valid values for type are: RollingUpdate and Recreate. If you are using the ROCKSDB version store type then you should use Recreate. Max Surge will allow new pods to be created before old ones are culled. Do not enable this when using ROCKSDB version store type. Max Unavailable will allow old pods to be culled before replacements are created See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy | +| dynamodb.profile | string | `"default"` | The name of the profile that should be used, when loading AWS credentials from a profile file. Required only if no secret is provided below. | +| dynamodb.region | string | `"us-west-2"` | The AWS region to use. | +| dynamodb.secret.awsAccessKeyId | string | `"aws_access_key_id"` | The secret key storing the AWS secret key id. | +| dynamodb.secret.awsSecretAccessKey | string | `"aws_secret_access_key"` | The secret key storing the AWS secret access key. | +| dynamodb.secret.name | string | `"awscreds"` | The secret name to pull AWS credentials from. Optional; if not present, the default AWS credentials provider chain is used. | +| extraEnv | list | `[]` | Advanced configuration via Environment Variables. Extra environment variables to add to the Nessie server container. You can pass here any valid EnvVar object: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#envvar-v1-core This can be useful to get configuration values from Kubernetes secrets or config maps. | +| extraInitContainers | list | `[]` | Add additional init containers to the nessie pod(s) See https://kubernetes.io/docs/concepts/workloads/pods/init-containers/. | +| extraServices | list | `[]` | Additional service definitions. All service definitions always select all Nessie pods. Use this if you need to expose specific ports with different configurations. | +| extraVolumeMounts | list | `[]` | Extra volume mounts to add to the nessie container. See https://kubernetes.io/docs/concepts/storage/volumes/. | +| extraVolumes | list | `[]` | Extra volumes to add to the nessie pod. See https://kubernetes.io/docs/concepts/storage/volumes/. | +| image.configDir | string | `"/deployments/config"` | The path to the directory where the application.properties file should be mounted. | +| image.pullPolicy | string | `"IfNotPresent"` | The image pull policy. | +| image.repository | string | `"ghcr.io/projectnessie/nessie"` | The image repository to pull from. | +| image.tag | string | `""` | Overrides the image tag whose default is the chart version. | +| imagePullSecrets | list | `[]` | References to secrets in the same namespace to use for pulling any of the images used by this chart. Each entry is a LocalObjectReference to an existing secret in the namespace. The secret must contain a .dockerconfigjson key with a base64-encoded Docker configuration file. See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ for more information. | +| ingress | object | `{"annotations":{},"className":"","enabled":false,"hosts":[{"host":"chart-example.local","paths":[],"service":{"nameSuffix":"","portName":"nessie-http"}}],"pathType":"ImplementationSpecific","tls":[]}` | Nessie Ingress settings. These settings generate an Ingress resource that routes external traffic to the Nessie service. Consider enabling sticky sessions based on the remote client's IP address; this is generally beneficial to Nessie deployments, but some testing may be required in order to make sure that the load is distributed evenly among the pods. Check your ingress controller's documentation. | +| ingress.annotations | object | `{}` | Annotations to add to the ingress. | +| ingress.className | string | `""` | Specifies the ingressClassName; leave empty if you don't want to customize it. | +| ingress.enabled | bool | `false` | Specifies whether an ingress should be created. | +| ingress.hosts | list | `[{"host":"chart-example.local","paths":[],"service":{"nameSuffix":"","portName":"nessie-http"}}]` | A list of host paths used to configure the ingress. | +| ingress.hosts[0].service | object | `{"nameSuffix":"","portName":"nessie-http"}` | The service target for the ingress. | +| ingress.hosts[0].service.nameSuffix | string | `""` | The target service name suffix. Optional; if not provided, the main service will be targeted. Change this only if you are targeting a service defined in extraServices. | +| ingress.hosts[0].service.portName | string | `"nessie-http"` | The port name to route traffic to. Must match one of the ports in service.ports or in extraServices.ports. Optional; if not provided, the first port in service.ports will be used. | +| ingress.pathType | string | `"ImplementationSpecific"` | Specifies the path type of host paths. Valid values are: "Prefix", "Exact" or "ImplementationSpecific". | +| ingress.tls | list | `[]` | A list of TLS certificates; each entry has a list of hosts in the certificate, along with the secret name used to terminate TLS traffic on port 443. | +| jdbc.jdbcUrl | string | `"jdbc:postgresql://localhost:5432/my_database?currentSchema=nessie"` | The JDBC connection string. If you are using Nessie OSS images, then only PostgreSQL, MariaDB and MySQL URLs are supported. Check your JDBC driver documentation for the correct URL format. | +| jdbc.secret.name | string | `"datasource-creds"` | The secret name to pull datasource credentials from. | +| jdbc.secret.password | string | `"password"` | The secret key storing the datasource password. | +| jdbc.secret.username | string | `"username"` | The secret key storing the datasource username. | +| livenessProbe | object | `{"failureThreshold":3,"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"terminationGracePeriodSeconds":30,"timeoutSeconds":10}` | Configures the liveness probe for nessie pods. | +| livenessProbe.failureThreshold | int | `3` | Minimum consecutive failures for the probe to be considered failed after having succeeded. Minimum value is 1. | +| livenessProbe.initialDelaySeconds | int | `5` | Number of seconds after the container has started before liveness probes are initiated. Minimum value is 0. | +| livenessProbe.periodSeconds | int | `10` | How often (in seconds) to perform the probe. Minimum value is 1. | +| livenessProbe.successThreshold | int | `1` | Minimum consecutive successes for the probe to be considered successful after having failed. Minimum value is 1. | +| livenessProbe.terminationGracePeriodSeconds | int | `30` | Optional duration in seconds the pod needs to terminate gracefully upon probe failure. Minimum value is 1. | +| livenessProbe.timeoutSeconds | int | `10` | Number of seconds after which the probe times out. Minimum value is 1. | +| log | object | `{"categories":{"org.projectnessie":"INFO"},"console":{"enabled":true,"format":"%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n","json":false,"threshold":"ALL"},"file":{"enabled":false,"fileName":"nessie.log","format":"%d{yyyy-MM-dd HH:mm:ss,SSS} %h %N[%i] %-5p [%X{traceId},%X{spanId},%X{sampled}] [%c{3.}] (%t) %s%e%n","json":false,"logsDir":"/deployments/logs","rotation":{"fileSuffix":null,"maxBackupIndex":5,"maxFileSize":"100Mi"},"storage":{"className":"standard","selectorLabels":{},"size":"512Gi"},"threshold":"ALL"},"level":"INFO","sentry":{"dsn":null,"enabled":false,"environment":null,"inAppPackages":["org.projectnessie"],"level":"ERROR","release":null}}` | Logging configuration. | +| log.categories | object | `{"org.projectnessie":"INFO"}` | Configuration for specific log categories. | +| log.console | object | `{"enabled":true,"format":"%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n","json":false,"threshold":"ALL"}` | Configuration for the console appender. | +| log.console.enabled | bool | `true` | Whether to enable the console appender. | +| log.console.format | string | `"%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n"` | The log format to use. Ignored if JSON format is enabled. See https://quarkus.io/guides/logging#logging-format for details. | +| log.console.json | bool | `false` | Whether to log in JSON format. | +| log.console.threshold | string | `"ALL"` | The log level of the console appender. | +| log.file | object | `{"enabled":false,"fileName":"nessie.log","format":"%d{yyyy-MM-dd HH:mm:ss,SSS} %h %N[%i] %-5p [%X{traceId},%X{spanId},%X{sampled}] [%c{3.}] (%t) %s%e%n","json":false,"logsDir":"/deployments/logs","rotation":{"fileSuffix":null,"maxBackupIndex":5,"maxFileSize":"100Mi"},"storage":{"className":"standard","selectorLabels":{},"size":"512Gi"},"threshold":"ALL"}` | Configuration for the file appender. | +| log.file.enabled | bool | `false` | Whether to enable the file appender. | +| log.file.fileName | string | `"nessie.log"` | The log file name. | +| log.file.format | string | `"%d{yyyy-MM-dd HH:mm:ss,SSS} %h %N[%i] %-5p [%X{traceId},%X{spanId},%X{sampled}] [%c{3.}] (%t) %s%e%n"` | The log format to use. Ignored if JSON format is enabled. See https://quarkus.io/guides/logging#logging-format for details. | +| log.file.json | bool | `false` | Whether to log in JSON format. | +| log.file.logsDir | string | `"/deployments/logs"` | The local directory where log files are stored. The persistent volume claim will be mounted here. | +| log.file.rotation | object | `{"fileSuffix":null,"maxBackupIndex":5,"maxFileSize":"100Mi"}` | Log rotation configuration. | +| log.file.rotation.fileSuffix | string | `nil` | An optional suffix to append to the rotated log files. If present, the rotated log files will be grouped in time buckets, and each bucket will contain at most maxBackupIndex files. The suffix must be in a date-time format that is understood by DateTimeFormatter. If the suffix ends with .gz or .zip, the rotated files will also be compressed using the corresponding algorithm. | +| log.file.rotation.maxBackupIndex | int | `5` | The maximum number of backup files to keep. | +| log.file.rotation.maxFileSize | string | `"100Mi"` | The maximum size of the log file before it is rotated. Should be expressed as a Kubernetes quantity. | +| log.file.storage | object | `{"className":"standard","selectorLabels":{},"size":"512Gi"}` | The log storage configuration. A persistent volume claim will be created using these settings. | +| log.file.storage.className | string | `"standard"` | The storage class name of the persistent volume claim to create. | +| log.file.storage.selectorLabels | object | `{}` | Labels to add to the persistent volume claim spec selector; a persistent volume with matching labels must exist. Leave empty if using dynamic provisioning. | +| log.file.storage.size | string | `"512Gi"` | The size of the persistent volume claim to create. | +| log.file.threshold | string | `"ALL"` | The log level of the file appender. | +| log.level | string | `"INFO"` | The log level of the root category, which is used as the default log level for all categories. | +| log.sentry | object | `{"dsn":null,"enabled":false,"environment":null,"inAppPackages":["org.projectnessie"],"level":"ERROR","release":null}` | Configuration for the Sentry appender. See https://sentry.io and https://docs.quarkiverse.io/quarkus-logging-sentry/dev for more information. | +| log.sentry.dsn | string | `nil` | The Sentry DSN. Required. | +| log.sentry.enabled | bool | `false` | Whether to enable the Sentry appender. | +| log.sentry.environment | string | `nil` | The environment to report to Sentry. Optional. | +| log.sentry.inAppPackages | list | `["org.projectnessie"]` | Package prefixes that belong to your application. | +| log.sentry.level | string | `"ERROR"` | The log level of the Sentry appender. | +| log.sentry.release | string | `nil` | The release version to report to Sentry. Optional. | +| managementService | object | `{"annotations":{},"portName":"nessie-mgmt","portNumber":9000}` | Management service settings. These settings are used to configure liveness and readiness probes, and to configure the dedicated headless service that will expose health checks and metrics, e.g. for metrics scraping and service monitoring. | +| managementService.annotations | object | `{}` | Annotations to add to the service. | +| managementService.portName | string | `"nessie-mgmt"` | The name of the management port. Required. | +| managementService.portNumber | int | `9000` | The port the management service listens on. By default, the management interface is exposed on HTTP port 9000. | +| metrics.enabled | bool | `true` | Specifies whether metrics for the nessie server should be enabled. | +| metrics.tags | object | `{}` | Additional tags (dimensional labels) to add to the metrics. | +| mongodb.connectionString | string | `"mongodb://localhost:27017"` | The MongoDB connection string. | +| mongodb.name | string | `"nessie"` | The MongoDB database name. | +| mongodb.secret.name | string | `"mongodb-creds"` | The secret name to pull MongoDB credentials from. | +| mongodb.secret.password | string | `"mongodb_password"` | The secret key storing the MongoDB password. | +| mongodb.secret.username | string | `"mongodb_username"` | The secret key storing the MongoDB username. | +| nodeSelector | object | `{}` | Node labels which must match for the nessie pod to be scheduled on that node. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector. | +| podAnnotations | object | `{}` | Annotations to apply to nessie pods. | +| podLabels | object | `{}` | Additional Labels to apply to nessie pods. | +| podSecurityContext | object | `{"fsGroup":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the nessie pod. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/. | +| readinessProbe | object | `{"failureThreshold":3,"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":10}` | Configures the readiness probe for nessie pods. | +| readinessProbe.failureThreshold | int | `3` | Minimum consecutive failures for the probe to be considered failed after having succeeded. Minimum value is 1. | +| readinessProbe.initialDelaySeconds | int | `5` | Number of seconds after the container has started before readiness probes are initiated. Minimum value is 0. | +| readinessProbe.periodSeconds | int | `10` | How often (in seconds) to perform the probe. Minimum value is 1. | +| readinessProbe.successThreshold | int | `1` | Minimum consecutive successes for the probe to be considered successful after having failed. Minimum value is 1. | +| readinessProbe.timeoutSeconds | int | `10` | Number of seconds after which the probe times out. Minimum value is 1. | +| replicaCount | int | `1` | The number of replicas to deploy (horizontal scaling). Beware that replicas are stateless; don't set this number > 1 when using IN_MEMORY or ROCKSDB version store types. | +| resources | object | `{}` | Configures the resources requests and limits for nessie pods. We usually recommend not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'resources:'. | +| rocksdb.selectorLabels | object | `{}` | Labels to add to the persistent volume claim spec selector; a persistent volume with matching labels must exist. Leave empty if using dynamic provisioning. | +| rocksdb.storageClassName | string | `"standard"` | The storage class name of the persistent volume claim to create. | +| rocksdb.storageSize | string | `"1Gi"` | The size of the persistent volume claim to create. | +| securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10000}` | Security context for the nessie container. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/. | +| service | object | `{"annotations":{},"clusterIP":"","externalTrafficPolicy":"Cluster","internalTrafficPolicy":"Cluster","ports":[{"name":"nessie-http","number":19120}],"sessionAffinity":"None","trafficDistribution":"PreferClose","type":"ClusterIP"}` | Nessie main service settings. | +| service.annotations | object | `{}` | Annotations to add to the service. | +| service.clusterIP | string | `""` | You can specify your own cluster IP address If you define a Service that has the .spec.clusterIP set to "None" then Kubernetes does not assign an IP address. Instead, DNS records for the service will return the IP addresses of each pod targeted by the server. This is called a headless service. See https://kubernetes.io/docs/concepts/services-networking/service/#headless-services | +| service.internalTrafficPolicy | string | `"Cluster"` | The traffic policy fields control how traffic from internal and external sources are routed respectively. Valid values are Cluster and Local. Set the field to Cluster to route traffic to all ready endpoints. Set the field to Local to only route to ready node-local endpoints. If the traffic policy is Local and there are no node-local endpoints, traffic is dropped by kube-proxy | +| service.ports | list | `[{"name":"nessie-http","number":19120}]` | The ports the service will listen on. At least one port is required; the first port implicitly becomes the HTTP port that the application will use for serving API requests. By default, it's 19120. Note: port names must be unique and no more than 15 characters long. | +| service.sessionAffinity | string | `"None"` | The session affinity for the service. Valid values are: None, ClientIP. ClientIP enables sticky sessions based on the client's IP address. This is generally beneficial to Nessie deployments, but some testing may be required in order to make sure that the load is distributed evenly among the pods. Also, this setting affects only internal clients, not external ones. If Ingress is enabled, it is recommended to set sessionAffinity to None. | +| service.trafficDistribution | string | `"PreferClose"` | The traffic distribution field provides another way to influence traffic routing within a Kubernetes Service. While traffic policies focus on strict semantic guarantees, traffic distribution allows you to express preferences such as routing to topologically closer endpoints. Valid values are: PreferClose | +| service.type | string | `"ClusterIP"` | The type of service to create. | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | +| serviceMonitor.enabled | bool | `true` | Specifies whether a ServiceMonitor for Prometheus operator should be created. | +| serviceMonitor.interval | string | `""` | The scrape interval; leave empty to let Prometheus decide. Must be a valid duration, e.g. 1d, 1h30m, 5m, 10s. | +| serviceMonitor.labels | object | `{}` | Labels for the created ServiceMonitor so that Prometheus operator can properly pick it up. | +| serviceMonitor.metricRelabelings | list | `[]` | Relabeling rules to apply to metrics. Ref https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config. | +| tolerations | list | `[]` | A list of tolerations to apply to nessie pods. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/. | +| tracing.attributes | object | `{}` | Resource attributes to identify the nessie service among other tracing sources. See https://opentelemetry.io/docs/reference/specification/resource/semantic_conventions/#service. If left empty, traces will be attached to a service named "Nessie"; to change this, provide a service.name attribute here. | +| tracing.enabled | bool | `false` | Specifies whether tracing for the nessie server should be enabled. | +| tracing.endpoint | string | `"http://otlp-collector:4317"` | The collector endpoint URL to connect to (required). The endpoint URL must have either the http:// or the https:// scheme. The collector must talk the OpenTelemetry protocol (OTLP) and the port must be its gRPC port (by default 4317). See https://quarkus.io/guides/opentelemetry for more information. | +| tracing.sample | string | `"1.0d"` | Which requests should be sampled. Valid values are: "all", "none", or a ratio between 0.0 and "1.0d" (inclusive). E.g. "0.5d" means that 50% of the requests will be sampled. | +| versionStoreType | string | `"IN_MEMORY"` | Which type of version store to use: IN_MEMORY, ROCKSDB, DYNAMODB2, MONGODB2, CASSANDRA2, JDBC2, BIGTABLE. Note: the version store type JDBC is deprecated, please use the Nessie Server Admin Tool to migrate to JDBC2. Note: the version store type CASSANDRA is deprecated, please use the Nessie Server Admin Tool to migrate to CASSANDRA2. Note: the version store type DYNAMODB is deprecated, please use the Nessie Server Admin Tool to migrate to DYNAMODB2. Note: the version store type MONGODB is deprecated, please use the Nessie Server Admin Tool to migrate to MONGODB2. | diff --git a/addons/nessie/0.103/chart/nessie/simple-demo-values.yaml b/addons/nessie/0.103/chart/nessie/simple-demo-values.yaml new file mode 100644 index 00000000..497b66f3 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/simple-demo-values.yaml @@ -0,0 +1,44 @@ +service: + # -- The type of service to create.ClusterIP , LoadBalancer + type: LoadBalancer +jdbc: + # -- The JDBC connection string. If you are using Nessie OSS images, then only + # PostgreSQL, MariaDB and MySQL URLs are supported. Check your JDBC driver documentation + # for the correct URL format. + jdbcUrl: jdbc:postgresql://10.43.205.80:5432/postgres + # -- The secret key storing the datasource username. + username: administrator + # -- The secret key storing the datasource password. + password: gvB7Ha9uP7 + +catalog: + # -- Whether to enable the REST catalog service. + enabled: true + iceberg: + # -- The default warehouse name. Required. This is just a symbolic name; it must refer to a + # declared warehouse below. + defaultWarehouse: warehouse + warehouses: + # -- Symbolic name of the warehouse. Required. + - name: warehouse + # -- Location of the warehouse. Required. Used to determine the base location of a table. + # Scheme must be either s3 (Amazon S3), gs (Google GCS) or abfs / abfss (Azure ADLS). Storage + # properties for each location can be defined below. + location: s3://bucket1/ + + storage: + s3: + # Global S3 settings. Can be overridden on a per-bucket basis below. + defaultOptions: + # -- DNS name of the region, required for AWS. + region: us-west-1 + # -- Endpoint URI, required for private clouds. Optional; if not provided, the default is + # used. + endpoint: "http://10.43.223.144:9000" + # -- The secret key storing the AWS secret key id. + awsAccessKeyId: admin + # -- The secret key storing the AWS secret access key. + awsSecretAccessKey: u929mrqXtb + +extraEnv: + [] \ No newline at end of file diff --git a/addons/nessie/0.103/chart/nessie/templates/NOTES.txt b/addons/nessie/0.103/chart/nessie/templates/NOTES.txt new file mode 100644 index 00000000..b06ddd7c --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/NOTES.txt @@ -0,0 +1,38 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +To connect to Nessie, please execute the following commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nessie.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nessie.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nessie.fullname" . }} -o jsonpath="{ .status.loadBalancer.ingress[0].ip }") + echo http://$SERVICE_IP:{{ get (first .Values.service.ports) "number" }} +{{- else if contains "ClusterIP" .Values.service.type }} + nohup kubectl --namespace {{ .Release.Namespace }} port-forward svc/{{ include "nessie.fullname" . }} 19120:{{ get (first .Values.service.ports) "number" }} & + echo "Visit http://127.0.0.1:19120 to use your application" +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/_helpers.tpl b/addons/nessie/0.103/chart/nessie/templates/_helpers.tpl new file mode 100644 index 00000000..39b474b4 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/_helpers.tpl @@ -0,0 +1,519 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "nessie.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nessie.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nessie.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "nessie.labels" -}} +helm.sh/chart: {{ include "nessie.chart" . }} +{{ include "nessie.selectorLabels" . }} +app.kubernetes.io/version: {{ .Chart.Version | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "nessie.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nessie.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nessie.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "nessie.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Convert a dict into a string formed by a comma-separated list of key-value pairs: key1=value1,key2=value2, ... +*/}} +{{- define "nessie.dictToString" -}} +{{- $list := list -}} +{{- range $k, $v := . -}} +{{- $list = append $list (printf "%s=%s" $k $v) -}} +{{- end -}} +{{ join "," $list }} +{{- end -}} + +{{- define "nessie.mergeAdvancedConfig" -}} +{{- $advConfig := index . 0 -}} +{{- $prefix := index . 1 -}} +{{- $dest := index . 2 -}} +{{- range $key, $val := $advConfig -}} +{{- $name := ternary $key (print $prefix "." $key) (eq $prefix "") -}} +{{- if kindOf $val | eq "map" -}} +{{- list $val $name $dest | include "nessie.mergeAdvancedConfig" -}} +{{- else -}} +{{- $_ := set $dest $name $val -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Determine the datasource kind based on the jdbcUrl. This relies on the fact that datasource +names should coincide with jdbc schemes in connection URIs. +*/}} +{{- define "nessie.dbKind" -}} +{{- $v := . | split ":" -}} +{{ $v._1 }} +{{- end }} + +{{/* +Apply Nessie Catalog (Iceberg REST) options. +*/}} +{{- define "nessie.applyCatalogIcebergOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $map := index . 1 -}}{{/* the destination map */}} +{{- with $root -}} +{{- $_ := set $map "nessie.catalog.default-warehouse" .defaultWarehouse -}} +{{- $_ = set $map "nessie.catalog.object-stores.health-check.enabled" .objectStoresHealthCheckEnabled -}} +{{- range $k, $v := .configDefaults -}} +{{- $_ = set $map ( printf "nessie.catalog.iceberg-config-defaults.%s" $k ) $v -}} +{{- end -}} +{{- range $k, $v := .configOverrides -}} +{{- $_ = set $map ( printf "nessie.catalog.iceberg-config-overrides.%s" $k ) $v -}} +{{- end -}} +{{- range $i, $warehouse := .warehouses -}} +{{- if not $warehouse.name -}}{{- required ( printf "catalog.iceberg.warehouses[%d]: missing warehouse name" $i ) $warehouse.name -}}{{- end -}} +{{- $_ = set $map ( printf "nessie.catalog.warehouses.%s.location" ( quote $warehouse.name ) ) $warehouse.location -}} +{{- range $k, $v := $warehouse.configDefaults -}} +{{- $_ = set $map ( printf "nessie.catalog.warehouses.%s.iceberg-config-defaults.%s" ( quote $warehouse.name ) $k ) $v -}} +{{- end -}} +{{- range $k, $v := $warehouse.configOverrides -}} +{{- $_ = set $map ( printf "nessie.catalog.warehouses.%s.iceberg-config-overrides.%s" ( quote $warehouse.name ) $k ) $v -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Apply S3 catalog options. +*/}} +{{- define "nessie.applyCatalogStorageS3RootOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- if .transport -}} +{{- include "nessie.addConfigOption" (list .transport.maxHttpConnections $map ( print $prefix "http.max-http-connections" )) -}} +{{- include "nessie.addConfigOption" (list .transport.readTimeout $map ( print $prefix "http.read-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectTimeout $map ( print $prefix "http.connect-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectionAcquisitionTimeout $map ( print $prefix "http.connection-acquisition-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectionMaxIdleTime $map ( print $prefix "http.connection-max-idle-time" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectionTimeToLive $map ( print $prefix "http.connection-time-to-live" )) -}} +{{- include "nessie.addConfigOption" (list .transport.expectContinueEnabled $map ( print $prefix "http.expect-continue-enabled" )) -}} +{{- end -}} +{{- if .sessionCredentials }} +{{- include "nessie.addConfigOption" (list .sessionCredentials.sessionCredentialRefreshGracePeriod $map ( print $prefix "sts.session-grace-period" )) -}} +{{- include "nessie.addConfigOption" (list .sessionCredentials.sessionCredentialCacheMaxEntries $map ( print $prefix "sts.session-cache-max-size" )) -}} +{{- include "nessie.addConfigOption" (list .sessionCredentials.stsClientsCacheMaxEntries $map ( print $prefix "sts.clients-cache-max-size" )) -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "nessie.applyCatalogStorageS3BucketOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- include "nessie.addConfigOption" (list .name $map ( print $prefix "name" )) -}} +{{- include "nessie.addConfigOption" (list .authority $map ( print $prefix "authority" )) -}} +{{- include "nessie.addConfigOption" (list .pathPrefix $map ( print $prefix "path-prefix" )) -}} +{{- include "nessie.addConfigOption" (list .region $map ( print $prefix "region" )) -}} +{{- include "nessie.addConfigOption" (list .endpoint $map ( print $prefix "endpoint" )) -}} +{{- include "nessie.addConfigOption" (list .externalEndpoint $map ( print $prefix "external-endpoint" )) -}} +{{- include "nessie.addConfigOption" (list .pathStyleAccess $map ( print $prefix "path-style-access" )) -}} +{{- include "nessie.addConfigOption" (list .accessPoint $map ( print $prefix "access-point" )) -}} +{{- include "nessie.addConfigOption" (list .allowCrossRegionAccessPoint $map ( print $prefix "allow-cross-region-access-point" )) -}} +{{- include "nessie.addConfigOption" (list .requestSigningEnabled $map ( print $prefix "request-signing-enabled" )) -}} +{{- include "nessie.addConfigOption" (list .authType $map ( print $prefix "auth-type" )) -}} +{{- include "nessie.addConfigOption" (list .stsEndpoint $map ( print $prefix "sts-endpoint" )) -}} +{{- if .clientIam -}} +{{- include "nessie.addConfigOption" (list .clientIam.enabled $map ( print $prefix "client-iam.enabled" )) -}} +{{- include "nessie.addConfigOption" (list .clientIam.policy $map ( print $prefix "client-iam.policy" )) -}} +{{- include "nessie.addConfigOption" (list .clientIam.roleArn $map ( print $prefix "client-iam.assume-role" )) -}} +{{- include "nessie.addConfigOption" (list .clientIam.roleSessionName $map ( print $prefix "client-iam.role-session-name" )) -}} +{{- include "nessie.addConfigOption" (list .clientIam.externalId $map ( print $prefix "client-iam.external-id" )) -}} +{{- include "nessie.addConfigOption" (list .clientIam.sessionDuration $map ( print $prefix "client-iam.session-duration" )) -}} +{{- if .clientIam.statements -}} +{{- range $i, $statement := .clientIam.statements -}} +{{- $_ := set $map ( printf "%sclient-iam.statements[%d]" $prefix $i ) $statement -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if .serverIam -}} +{{- include "nessie.addConfigOption" (list .serverIam.enabled $map ( print $prefix "server-iam.enabled" )) -}} +{{- include "nessie.addConfigOption" (list .serverIam.policy $map ( print $prefix "server-iam.policy" )) -}} +{{- include "nessie.addConfigOption" (list .serverIam.roleArn $map ( print $prefix "server-iam.ssume-role" )) -}} +{{- include "nessie.addConfigOption" (list .serverIam.roleSessionName $map ( print $prefix "server-iam.role-session-name" )) -}} +{{- include "nessie.addConfigOption" (list .serverIam.externalId $map ( print $prefix "server-iam.external-id" )) -}} +{{- include "nessie.addConfigOption" (list .serverIam.sessionDuration $map ( print $prefix "server-iam.session-duration" )) -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Apply GCS catalog options. +*/}} +{{- define "nessie.applyCatalogStorageGcsRootOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- if .transport -}} +{{- include "nessie.addConfigOption" (list .transport.maxAttempts $map ( print $prefix "max-attempts" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectTimeout $map ( print $prefix "connect-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.readTimeout $map ( print $prefix "read-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.initialRetryDelay $map ( print $prefix "initial-retry-delay" )) -}} +{{- include "nessie.addConfigOption" (list .transport.maxRetryDelay $map ( print $prefix "max-retry-delay" )) -}} +{{- include "nessie.addConfigOption" (list .transport.retryDelayMultiplier $map ( print $prefix "retry-delay-multiplier" )) -}} +{{- include "nessie.addConfigOption" (list .transport.initialRpcTimeout $map ( print $prefix "initial-rpc-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.maxRpcTimeout $map ( print $prefix "max-rpc-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.rpcTimeoutMultiplier $map ( print $prefix "rpc-timeout-multiplier" )) -}} +{{- include "nessie.addConfigOption" (list .transport.logicalTimeout $map ( print $prefix "logical-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.totalTimeout $map ( print $prefix "total-timeout" )) -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "nessie.applyCatalogStorageGcsBucketOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- include "nessie.addConfigOption" (list .name $map ( print $prefix "name" )) -}} +{{- include "nessie.addConfigOption" (list .authority $map ( print $prefix "authority" )) -}} +{{- include "nessie.addConfigOption" (list .pathPrefix $map ( print $prefix "path-prefix" )) -}} +{{- include "nessie.addConfigOption" (list .host $map ( print $prefix "host" )) -}} +{{- include "nessie.addConfigOption" (list .externalHost $map ( print $prefix "external-host" )) -}} +{{- include "nessie.addConfigOption" (list .userProject $map ( print $prefix "user-project" )) -}} +{{- include "nessie.addConfigOption" (list .projectId $map ( print $prefix "project-id" )) -}} +{{- include "nessie.addConfigOption" (list .quotaProjectId $map ( print $prefix "quota-project-id" )) -}} +{{- include "nessie.addConfigOption" (list .clientLibToken $map ( print $prefix "client-lib-token" )) -}} +{{- include "nessie.addConfigOption" (list .authType $map ( print $prefix "auth-type" )) -}} +{{- include "nessie.addConfigOption" (list .encryptionKey $map ( print $prefix "encryption-key" )) -}} +{{- include "nessie.addConfigOption" (list .decryptionKey $map ( print $prefix "decryption-key" )) -}} +{{- include "nessie.addConfigOption" (list .readChunkSize $map ( print $prefix "read-chunk-size" )) -}} +{{- include "nessie.addConfigOption" (list .writeChunkSize $map ( print $prefix "write-chunk-size" )) -}} +{{- include "nessie.addConfigOption" (list .deleteBatchSize $map ( print $prefix "delete-batch-size" )) -}} +{{- end -}} +{{- end -}} + +{{/* +Apply ADLS catalog options. +*/}} +{{- define "nessie.applyCatalogStorageAdlsRootOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- if .transport -}} +{{- include "nessie.addConfigOption" (list .transport.maxHttpConnections $map ( print $prefix "max-http-connections" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectTimeout $map ( print $prefix "connect-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.readTimeout $map ( print $prefix "read-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.writeTimeout $map ( print $prefix "write-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.connectionIdleTimeout $map ( print $prefix "connection-idle-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .transport.readBlockSize $map ( print $prefix "read-block-size" )) -}} +{{- include "nessie.addConfigOption" (list .transport.writeBlockSize $map ( print $prefix "write-block-size" )) -}} +{{- end -}} +{{- list .advancedConfig ( print $prefix "configuration" ) $map | include "nessie.mergeAdvancedConfig" }} +{{- end -}} +{{- end -}} + +{{- define "nessie.applyCatalogStorageAdlsFileSystemOptions" -}} +{{- $root := index . 0 -}}{{/* the object to introspect */}} +{{- $prefix := index . 1 -}}{{/* the current prefix */}} +{{- $map := index . 2 -}}{{/* the destination map */}} +{{- with $root -}} +{{- include "nessie.addConfigOption" (list .name $map ( print $prefix "name" )) -}} +{{- include "nessie.addConfigOption" (list .authority $map ( print $prefix "authority" )) -}} +{{- include "nessie.addConfigOption" (list .pathPrefix $map ( print $prefix "path-prefix" )) -}} +{{- include "nessie.addConfigOption" (list .endpoint $map ( print $prefix "endpoint" )) -}} +{{- include "nessie.addConfigOption" (list .externalEndpoint $map ( print $prefix "external-endpoint" )) -}} +{{- include "nessie.addConfigOption" (list .retryPolicy $map ( print $prefix "retry-policy" )) -}} +{{- include "nessie.addConfigOption" (list .maxRetries $map ( print $prefix "max-retries" )) -}} +{{- include "nessie.addConfigOption" (list .tryTimeout $map ( print $prefix "try-timeout" )) -}} +{{- include "nessie.addConfigOption" (list .retryDelay $map ( print $prefix "retry-delay" )) -}} +{{- include "nessie.addConfigOption" (list .maxRetryDelay $map ( print $prefix "max-retry-delay" )) -}} +{{- include "nessie.addConfigOption" (list .authType $map ( print $prefix "auth-type" )) -}} +{{- end -}} +{{- end -}} + +{{/* +Define environkent variables for catalog storage options. +*/}} +{{- define "nessie.catalogStorageEnv" -}} +{{ $global := .}} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.s3.defaultOptions.accessKeySecret "awsAccessKeyId" "s3.default-options.access-key" "name" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.s3.defaultOptions.accessKeySecret "awsSecretAccessKey" "s3.default-options.access-key" "secret" false . ) }} +{{- range $i, $bucket := .Values.catalog.storage.s3.buckets -}} +{{- with $global }} +{{- include "nessie.catalogSecretToEnv" (list $bucket.accessKeySecret "awsAccessKeyId" (printf "s3.buckets.bucket%d.access-key" (add $i 1)) "name" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list $bucket.accessKeySecret "awsSecretAccessKey" (printf "s3.buckets.bucket%d.access-key" (add $i 1)) "secret" false . ) }} +{{- end -}} +{{- end -}} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.gcs.defaultOptions.authCredentialsJsonSecret "key" "gcs.default-options.auth-credentials-json" "key" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.gcs.defaultOptions.oauth2TokenSecret "token" "gcs.default-options.oauth-token" "token" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.gcs.defaultOptions.oauth2TokenSecret "expiresAt" "gcs.default-options.oauth-token" "expiresAt" false . ) }} +{{- range $i, $bucket := .Values.catalog.storage.gcs.buckets -}} +{{- with $global }} +{{- include "nessie.catalogSecretToEnv" (list $bucket.authCredentialsJsonSecret "key" (printf "gcs.buckets.bucket%d.auth-credentials-json" (add $i 1)) "key" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list $bucket.oauth2TokenSecret "token" (printf "gcs.buckets.bucket%d.oauth-token" (add $i 1)) "token" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list $bucket.oauth2TokenSecret "expiresAt" (printf "gcs.buckets.bucket%d.oauth-token" (add $i 1)) "expiresAt" false . ) }} +{{- end -}} +{{- end -}} +{{ include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.adls.defaultOptions.accountSecret "accountName" "adls.default-options.account" "name" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.adls.defaultOptions.accountSecret "accountKey" "adls.default-options.account" "secret" false . ) }} +{{- include "nessie.catalogSecretToEnv" (list .Values.catalog.storage.adls.defaultOptions.sasTokenSecret "sasToken" "adls.default-options.sas-token" "key" true . ) }} +{{- range $i, $filesystem := .Values.catalog.storage.adls.filesystems -}} +{{- with $global }} +{{- include "nessie.catalogSecretToEnv" (list $filesystem.accountSecret "accountName" (printf "adls.file-systems.filesystem%d.account" (add $i 1)) "name" true . ) }} +{{- include "nessie.catalogSecretToEnv" (list $filesystem.accountSecret "accountKey" (printf "adls.file-systems.filesystem%d.account" (add $i 1)) "secret" false . ) }} +{{- include "nessie.catalogSecretToEnv" (list $filesystem.sasTokenSecret "sasToken" (printf "adls.file-systems.filesystem%d.sas-token" (add $i 1)) "key" true . ) }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Define an env var from secret key. + +Secrets are (can be) composite values - think of a username+password. +Secrets are not (no longer) present (or directly resolvable) from the bucket option types, but have to be resolved +via a symbolic name, which is something like 'nessie-catalog-secrets.s3.default-options.access-key'. The bucket +config types know about that symbolic name and resolve it via a SecretsProvider, which resolves via Quarkus' config. + +*/}} +{{- define "nessie.catalogSecretToEnv" -}} +{{- $secret := index . 0 -}} +{{- $key := index . 1 -}} +{{- $midfix := index . 2 -}} +{{- $suffix := index . 3 -}} +{{- $addRef := index . 4 -}} +{{- $global := index . 5 -}} +{{- if $secret -}} +{{- $secretName := printf "%s-%s" (include "nessie.fullname" $global) (get $secret "name") -}} +{{- $secretKey := get $secret $key -}} +{{- with $global -}} +{{- if (and $secretName $secretKey) -}} +{{ if $addRef -}} +# +# {{ $midfix }} +# +- name: {{ (printf "nessie.catalog.service.%s" $midfix) | quote }} + value: {{ (printf "urn:nessie-secret:quarkus:nessie-catalog-secrets.%s" $midfix) | quote }} +{{- end }} +- name: {{ (printf "nessie-catalog-secrets.%s.%s" $midfix $suffix) | quote }} + valueFrom: + secretKeyRef: + name: {{ (tpl $secretName . ) | quote }} + key: {{ (tpl $secretKey . ) | quote }} +{{ end -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Define an env var from secret key. +*/}} +{{- define "nessie.secretToEnv" -}} +{{- $secret := index . 0 -}} +{{- $key := index . 1 -}} +{{- $envVarName := index . 2 -}} +{{- $global := index . 3 -}} +{{- if $secret -}} +{{- $secretName := printf "%s-%s" (include "nessie.fullname" $global) (get $secret "name") -}} +{{- $secretKey := get $secret $key -}} +{{- with $global -}} +{{- if (and $secretName $secretKey) -}} +- name: {{ $envVarName | quote }} + valueFrom: + secretKeyRef: + name: {{ (tpl $secretName . ) | quote }} + key: {{ (tpl $secretKey . ) | quote }} +{{ end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Adds a configuration option to the map if the value is not nil. Zero-values like false or 0 are +considered valid and thus added. This template should not be applied to non-scalar values like +slices or maps. +*/}} +{{- define "nessie.addConfigOption" -}} +{{- $value := index . 0 -}}{{/* the value to add */}} +{{- $map := index . 1 -}}{{/* the destination map */}} +{{- $key := index . 2 -}}{{/* the destination map key */}} +{{- if (ne $value nil) -}} +{{- $_ := set $map $key $value -}} +{{- end -}} +{{- end -}} + +{{/* +Prints the configuration option to the destination configmap entry. See confimap.yaml. +Any nil values will be printed as empty config options; otherwise, the value will be evaluated +as a template against the global context, then printed. Furthermore, if the value contains +line breaks, they will be escaped and a multi-line option will be printed. +*/}} +{{- define "nessie.appendConfigOption" -}} +{{- $key := index . 0 -}} +{{- $value := index . 1 -}} +{{- $global := index . 2 -}} +{{- $valAsString := "" -}} +{{- if ne $value nil -}} +{{- $valAsString = tpl (toString $value) $global -}} +{{- if contains "\r\n" $valAsString -}} +{{- $valAsString = $valAsString | nindent 4 | replace "\r\n" "\\\r\n" -}} +{{- else if contains "\n" $valAsString -}} +{{- $valAsString = $valAsString | nindent 4 | replace "\n" "\\\n" -}} +{{- end -}} +{{- end -}} +{{ print $key "=" $valAsString }} +{{- end -}} + +{{/* +Prints the ports section of the container spec. Also validates all port names and numbers to ensure +that they are consistent and that there are no overlaps. +*/}} +{{- define "nessie.containerPorts" -}} +{{- $ports := dict -}} +{{- range $i, $port := .Values.service.ports -}} +{{- if hasKey $ports $port.name -}} +{{- fail (printf "service.ports[%d]: port name already taken: %v" $i $port.name) -}} +{{- end -}} +{{- if has $port.number (values $ports) -}} +{{- fail (printf "service.ports[%d]: port number already taken: %v" $i $port.number) -}} +{{- end -}} +{{- $_ := set $ports $port.name $port.number -}} +{{- end -}} +{{- if hasKey $ports .Values.managementService.portName -}} +{{- fail (print "managementService.portName: port name already taken: " .Values.managementService.portName ) -}} +{{- end -}} +{{- if has .Values.managementService.portNumber (values $ports) -}} +{{- fail (print "managementService.portNumber: port number already taken: " .Values.managementService.portNumber) -}} +{{- end -}} +{{- $_ := set $ports .Values.managementService.portName .Values.managementService.portNumber -}} +{{- range $i, $svc := .Values.extraServices -}} +{{- range $j, $port := $svc.ports -}} +{{- if hasKey $ports $port.name -}} +{{- if ne $port.number (get $ports $port.name) -}} +{{- fail (printf "extraServices[%d].ports[%d]: wrong port number for port %s, expected %v, got %v" $i $j $port.name (get $ports $port.name) $port.number) -}} +{{- end -}} +{{- else if has $port.number (values $ports) -}} +{{- fail (printf "extraServices[%d].ports[%d]: port number already taken: %v" $i $j $port.number) -}} +{{- end -}} +{{- $_ := set $ports $port.name $port.number -}} +{{- end -}} +{{- end -}} +ports: +{{ range $portName, $portNumber := $ports -}} +- name: {{ $portName }} + containerPort: {{ $portNumber }} + protocol: TCP +{{ end -}} +{{ end -}} + +{{/* +Shared - Converts a Kubernetes quantity to a number (int64 if possible or float64 otherwise). +It handles raw numbers as well as quantities with suffixes +like m, k, M, G, T, P, E, ki, Mi, Gi, Ti, Pi, Ei. +It also handles scientific notation. +Quantities should be positive, so negative values, zero, or any unparseable number +will result in a failure. +https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/ +*/}} +{{- define "nessie.quantity" -}} +{{- $quantity := . -}} +{{- $n := $quantity | float64 -}} +{{- if kindIs "string" $quantity -}} +{{- if hasSuffix "m" $quantity -}} +{{- $n = divf (trimSuffix "m" $quantity | float64) 1000.0 -}} +{{- else if hasSuffix "k" $quantity -}} +{{- $n = trimSuffix "k" $quantity | int64 | mul 1000 -}} +{{- else if hasSuffix "M" $quantity -}} +{{- $n = trimSuffix "M" $quantity | int64 | mul 1000000 -}} +{{- else if hasSuffix "G" $quantity -}} +{{- $n = trimSuffix "G" $quantity | int64 | mul 1000000000 -}} +{{- else if hasSuffix "T" $quantity -}} +{{- $n = trimSuffix "T" $quantity | int64 | mul 1000000000000 -}} +{{- else if hasSuffix "P" $quantity -}} +{{- $n = trimSuffix "P" $quantity | int64 | mul 1000000000000000 -}} +{{- else if hasSuffix "E" $quantity -}} +{{- $n = trimSuffix "E" $quantity | int64 | mul 1000000000000000000 -}} +{{- else if hasSuffix "ki" $quantity -}} +{{- $n = trimSuffix "ki" $quantity | int64 | mul 1024 -}} +{{- else if hasSuffix "Mi" $quantity -}} +{{- $n = trimSuffix "Mi" $quantity | int64 | mul 1048576 -}} +{{- else if hasSuffix "Gi" $quantity -}} +{{- $n = trimSuffix "Gi" $quantity | int64 | mul 1073741824 -}} +{{- else if hasSuffix "Ti" $quantity -}} +{{- $n = trimSuffix "Ti" $quantity | int64 | mul 1099511627776 -}} +{{- else if hasSuffix "Pi" $quantity -}} +{{- $n = trimSuffix "Pi" $quantity | int64 | mul 1125899906842624 -}} +{{- else if hasSuffix "Ei" $quantity -}} +{{- $n = trimSuffix "Ei" $quantity | int64 | mul 1152921504606846976 -}} +{{- end -}} +{{- end -}} +{{- if le ($n | float64) 0.0 -}} +{{- fail (print "invalid quantity: " $quantity) -}} +{{- end -}} +{{- if kindIs "float64" $n -}} +{{- printf "%f" $n -}} +{{- else -}} +{{- printf "%v" $n -}} +{{- end -}} +{{- end -}} diff --git a/addons/nessie/0.103/chart/nessie/templates/configmap.yaml b/addons/nessie/0.103/chart/nessie/templates/configmap.yaml new file mode 100644 index 00000000..ab804732 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/configmap.yaml @@ -0,0 +1,225 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- if .Values.configMapLabels }} + {{- toYaml .Values.configMapLabels | nindent 4 }} + {{- end }} +data: + application.properties: |- + {{- $map := dict -}} + {{- $_ := set $map "nessie.version.store.type" .Values.versionStoreType -}} + + {{- $serviceNamesMgmt := include "nessie.fullname" . | printf "%s-mgmt" -}} + {{- $cacheInvalToken := .Values.managementService | toYaml | sha1sum -}} + {{- $_ = set $map "nessie.version.store.persist.cache-invalidations.service-names" $serviceNamesMgmt -}} + {{- $_ = set $map "nessie.version.store.persist.cache-invalidations.valid-tokens" $cacheInvalToken -}} + + {{- $_ = set $map "quarkus.http.port" (get (first .Values.service.ports) "number") -}} + {{- $_ = set $map "quarkus.management.port" .Values.managementService.portNumber -}} + + {{- $_ = set $map "quarkus.log.level" (coalesce .Values.logLevel .Values.log.level) -}} + {{- if .Values.log.console.enabled -}} + {{- $_ = set $map "quarkus.log.console.enable" "true" -}} + {{- $_ = set $map "quarkus.log.console.level" .Values.log.console.threshold -}} + {{- if .Values.log.console.json -}} + {{- $_ = set $map "quarkus.log.console.json" "true" -}} + {{- else -}} + {{- $_ = set $map "quarkus.log.console.format" .Values.log.console.format -}} + {{- end -}} + {{- else -}} + {{- $_ = set $map "quarkus.log.console.enable" "false" -}} + {{- end -}} + {{- if .Values.log.file.enabled -}} + {{- $_ = set $map "quarkus.log.file.enable" "true" -}} + {{- $_ = set $map "quarkus.log.file.level" .Values.log.file.threshold -}} + {{- $_ = set $map "quarkus.log.file.path" (printf "%s/%s" .Values.log.file.logsDir .Values.log.file.fileName) -}} + {{- $_ = set $map "quarkus.log.file.rotation.max-file-size" (include "nessie.quantity" .Values.log.file.rotation.maxFileSize) -}} + {{- $_ = set $map "quarkus.log.file.rotation.max-backup-index" .Values.log.file.rotation.maxBackupIndex -}} + {{- if .Values.log.file.rotation.fileSuffix -}} + {{- $_ = set $map "quarkus.log.file.rotation.file-suffix" .Values.log.file.rotation.fileSuffix -}} + {{- end -}} + {{- if .Values.log.file.json -}} + {{- $_ = set $map "quarkus.log.file.json" "true" -}} + {{- else -}} + {{- $_ = set $map "quarkus.log.file.format" .Values.log.file.format -}} + {{- end -}} + {{- else -}} + {{- $_ = set $map "quarkus.log.file.enable" "false" -}} + {{- end -}} + {{- if .Values.log.sentry.enabled -}} + {{- $_ = set $map "quarkus.log.sentry" "true" -}} + {{- if not .Values.log.sentry.dsn -}}{{- required "log.sentry.dsn is required when log.sentry.enabled is true" .Values.log.sentry.dsn -}}{{- end -}} + {{- $_ = set $map "quarkus.log.sentry.dsn" .Values.log.sentry.dsn -}} + {{- $_ = set $map "quarkus.log.sentry.level" .Values.log.sentry.level -}} + {{- if .Values.log.sentry.environment -}} + {{- $_ = set $map "quarkus.log.sentry.environment" .Values.log.sentry.environment -}} + {{- end -}} + {{- if .Values.log.sentry.release -}} + {{- $_ = set $map "quarkus.log.sentry.release" .Values.log.sentry.release -}} + {{- end -}} + {{- $_ = set $map "quarkus.log.sentry.in-app-packages" ( join "," .Values.log.sentry.inAppPackages ) -}} + {{- else -}} + {{- $_ = set $map "quarkus.log.sentry" "false" -}} + {{- end -}} + {{- $categories := dict -}} + {{- list .Values.log.categories "" $categories | include "nessie.mergeAdvancedConfig" -}} + {{- range $k, $v := $categories -}} + {{- $_ = set $map (printf "quarkus.log.category.\"%s\".level" $k) $v -}} + {{- end -}} + + {{- if hasPrefix "DYNAMODB" .Values.versionStoreType -}} + {{- if .Values.dynamodb.region -}} + {{- $_ = set $map "quarkus.dynamodb.aws.region" .Values.dynamodb.region -}} + {{- end -}} + {{- if .Values.dynamodb.profile -}} + {{- $_ = set $map "quarkus.dynamodb.aws.credentials.profile-provider.profile-name" .Values.dynamodb.profile -}} + {{- end -}} + {{- end -}} + + {{- if hasPrefix "CASSANDRA" .Values.versionStoreType -}} + {{- $_ = set $map "quarkus.cassandra.keyspace" .Values.cassandra.keyspace -}} + {{- $_ = set $map "quarkus.cassandra.contact-points" .Values.cassandra.contactPoints -}} + {{- if .Values.cassandra.localDatacenter -}} + {{- $_ = set $map "quarkus.cassandra.local-datacenter" .Values.cassandra.localDatacenter -}} + {{- end -}} + {{- /* legacy support for username/password */}} + {{- if and .Values.cassandra.auth .Values.cassandra.auth.username -}} + {{- $_ = set $map "quarkus.cassandra.auth.username" .Values.cassandra.auth.username -}} + {{- $_ = set $map "quarkus.cassandra.auth.password" .Values.cassandra.auth.password -}} + {{- end -}} + {{- end -}} + + {{- if hasPrefix "ROCKSDB" .Values.versionStoreType -}} + {{- $_ = set $map "nessie.version.store.persist.rocks.database-path" "/rocks-nessie" -}} + {{- end -}} + + {{- if hasPrefix "MONGODB" .Values.versionStoreType -}} + {{- if .Values.mongodb.name -}} + {{- $_ = set $map "quarkus.mongodb.database" .Values.mongodb.name -}} + {{- end -}} + {{- if .Values.mongodb.connectionString -}} + {{- $_ = set $map "quarkus.mongodb.connection-string" .Values.mongodb.connectionString -}} + {{- end -}} + {{- end -}} + + {{- if hasPrefix "JDBC" .Values.versionStoreType -}} + {{- $oldConfig := .Values.postgres | default dict -}} + {{- $newConfig := .Values.jdbc | default dict -}} + {{- $jdbcUrl := coalesce $oldConfig.jdbcUrl $newConfig.jdbcUrl -}} + {{- $dbKind := include "nessie.dbKind" $jdbcUrl -}} + {{- $_ = set $map "nessie.version.store.persist.jdbc.datasource" $dbKind -}} + {{- $_ = set $map ( printf "quarkus.datasource.%s.jdbc.url" $dbKind ) $jdbcUrl }} + {{- end -}} + + {{- if hasPrefix "BIGTABLE" .Values.versionStoreType -}} + {{- $_ = set $map "quarkus.google.cloud.project-id" .Values.bigtable.projectId -}} + {{- $_ = set $map "nessie.version.store.persist.bigtable.instance-id" .Values.bigtable.instanceId -}} + {{- $_ = set $map "nessie.version.store.persist.bigtable.app-profile-id" .Values.bigtable.appProfileId -}} + {{- end -}} + + {{- if .Values.authentication.enabled -}} + {{- $_ = set $map "nessie.server.authentication.enabled" .Values.authentication.enabled -}} + {{- $_ = set $map "quarkus.oidc.ui-app.application-type" "web-app" -}} + {{- $_ = set $map "quarkus.oidc.ui-app.tenant-paths" "/,/tree*,/content*,/notfound*,/commits*" -}} + {{- if .Values.authentication.oidcAuthServerUrl -}} + {{- $_ = set $map "quarkus.oidc.auth-server-url" .Values.authentication.oidcAuthServerUrl -}} + {{- $_ = set $map "quarkus.oidc.ui-app.auth-server-url" .Values.authentication.oidcAuthServerUrl -}} + {{- end -}} + {{- if .Values.authentication.oidcClientId -}} + {{- $_ = set $map "quarkus.oidc.client-id" .Values.authentication.oidcClientId -}} + {{- $_ = set $map "quarkus.oidc.ui-app.client-id" .Values.authentication.oidcClientId -}} + {{- end -}} + {{- else -}} + {{- $_ = set $map "quarkus.oidc.tenant-enabled" false -}} + {{- end -}} + + {{- if .Values.authorization.enabled -}} + {{- $_ = set $map "nessie.server.authorization.enabled" .Values.authorization.enabled -}} + {{- if .Values.authorization.rules -}} + {{- range $ruleId, $rule := .Values.authorization.rules -}} + {{- $_ = set $map (printf "nessie.server.authorization.rules.%s" $ruleId ) $rule -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if .Values.tracing.enabled -}} + {{- $_ = set $map "quarkus.otel.exporter.otlp.endpoint" .Values.tracing.endpoint -}} + {{- if .Values.tracing.attributes -}} + {{- $_ = set $map "quarkus.otel.resource.attributes" (include "nessie.dictToString" .Values.tracing.attributes) -}} + {{- end -}} + {{- if .Values.tracing.sample -}} + {{ if eq .Values.tracing.sample "all" -}} + {{- $_ = set $map "quarkus.otel.traces.sampler" "parentbased_always_on" -}} + {{- else if eq .Values.tracing.sample "none" -}} + {{- $_ = set $map "quarkus.otel.traces.sampler" "always_off" -}} + {{- else -}} + {{- $_ = set $map "quarkus.otel.traces.sampler" "parentbased_traceidratio" -}} + {{- $_ = set $map "quarkus.otel.traces.sampler.arg" .Values.tracing.sample -}} + {{- end -}} + {{- end -}} + {{- else -}} + {{- $_ = set $map "quarkus.otel.sdk.disabled" true -}} + {{- end -}} + + {{- if .Values.metrics.enabled -}} + {{- range $name, $value := .Values.metrics.tags -}} + {{- $_ = set $map (print "nessie.metrics.tags." $name) $value -}} + {{- end -}} + {{- else -}} + {{- $_ = set $map "quarkus.micrometer.enabled" "false" -}} + {{- end -}} + + {{- if .Values.catalog.enabled -}} + {{- list .Values.catalog.iceberg $map | include "nessie.applyCatalogIcebergOptions" -}} + {{- if .Values.catalog.storage.retryAfter -}} + {{- $_ = set $map "nessie.catalog.error-handling.throttled-retry-after" .Values.catalog.storage.retryAfter -}} + {{- end -}} + {{- list .Values.catalog.storage.s3 "nessie.catalog.service.s3." $map | include "nessie.applyCatalogStorageS3RootOptions" }} + {{- list .Values.catalog.storage.s3.defaultOptions "nessie.catalog.service.s3.default-options." $map | include "nessie.applyCatalogStorageS3BucketOptions" }} + {{- range $i, $bucket := .Values.catalog.storage.s3.buckets -}} + {{- if not $bucket.name -}}{{- required ( printf "catalog.storage.s3.buckets[%d]: missing bucket name" $i ) $bucket.name -}}{{- end -}} + {{- list $bucket ( printf "nessie.catalog.service.s3.buckets.bucket%d." (add $i 1) ) $map | include "nessie.applyCatalogStorageS3BucketOptions" }} + {{- end -}} + {{- list .Values.catalog.storage.gcs "nessie.catalog.service.gcs." $map | include "nessie.applyCatalogStorageGcsRootOptions" }} + {{- list .Values.catalog.storage.gcs.defaultOptions "nessie.catalog.service.gcs.default-options." $map | include "nessie.applyCatalogStorageGcsBucketOptions" }} + {{- range $i, $bucket := .Values.catalog.storage.gcs.buckets -}} + {{- if not $bucket.name -}}{{- required ( printf "catalog.storage.gcs.buckets[%d]: missing bucket name" $i ) $bucket.name -}}{{- end -}} + {{ list $bucket ( printf "nessie.catalog.service.gcs.buckets.bucket%d." (add $i 1) ) $map | include "nessie.applyCatalogStorageGcsBucketOptions" }} + {{- end -}} + {{- list .Values.catalog.storage.adls "nessie.catalog.service.adls." $map | include "nessie.applyCatalogStorageAdlsRootOptions" }} + {{- list .Values.catalog.storage.adls.defaultOptions "nessie.catalog.service.adls.default-options." $map | include "nessie.applyCatalogStorageAdlsFileSystemOptions" }} + {{- range $i, $filesystem := .Values.catalog.storage.adls.filesystems -}} + {{- if not $filesystem.name -}}{{- required ( printf "catalog.storage.adls.filesystems[%d]: missing filesystem name" $i ) $filesystem.name -}}{{- end -}} + {{- list $filesystem ( printf "nessie.catalog.service.adls.file-systems.filesystem%d." (add $i 1) ) $map | include "nessie.applyCatalogStorageAdlsFileSystemOptions" }} + {{- end -}} + {{- else -}} + {{- $_ = set $map "nessie.catalog.object-stores.health-check.enabled" false -}} + {{- end -}} + + {{- list .Values.advancedConfig "" $map | include "nessie.mergeAdvancedConfig" }} + {{- $global := . -}} + {{- range $k, $v := $map }} + {{ include "nessie.appendConfigOption" (list $k $v $global) }} + {{- end }} \ No newline at end of file diff --git a/addons/nessie/0.103/chart/nessie/templates/deployment.yaml b/addons/nessie/0.103/chart/nessie/templates/deployment.yaml new file mode 100644 index 00000000..e163af32 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/deployment.yaml @@ -0,0 +1,199 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- if .Values.podLabels }} + {{- tpl (toYaml .Values.podLabels) . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "nessie.selectorLabels" . | nindent 6 }} + strategy: + {{- tpl (toYaml .Values.deploymentStrategy) . | nindent 4 }} + template: + metadata: + annotations: + projectnessie.org/config-checksum: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + prometheus.io/scrape: "{{ .Values.metrics.enabled | default "false" }}" + prometheus.io/port: "{{ .Values.managementService.portNumber }}" + prometheus.io/path: "/q/metrics" + {{- if .Values.podAnnotations }} + {{- tpl (toYaml .Values.podAnnotations) . | nindent 8 }} + {{- end }} + labels: + {{- include "nessie.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- tpl (toYaml .Values.podLabels) . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- tpl (toYaml .Values.imagePullSecrets) . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "nessie.serviceAccountName" . }} + securityContext: + {{- tpl (toYaml .Values.podSecurityContext) . | nindent 8 }} + {{- if .Values.extraInitContainers }} + initContainers: + {{- tpl (toYaml .Values.extraInitContainers) . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + ports: + - name: {{ .Values.managementService.portName | default "metrics" }} + containerPort: {{ .Values.managementService.port | default 9000 }} + securityContext: + {{- tpl (toYaml .Values.securityContext) . | nindent 12 }} + image: "{{ tpl .Values.image.repository . }}:{{ tpl .Values.image.tag . | default .Chart.Version }}" + imagePullPolicy: {{ tpl .Values.image.pullPolicy . }} + volumeMounts: + - name: nessie-config + mountPath: {{ trimSuffix "/" .Values.image.configDir }}/application.properties + subPath: application.properties + readOnly: true + - name: temp-dir + mountPath: /tmp + {{- if hasPrefix "ROCKSDB" .Values.versionStoreType }} + - name: rocks-storage + mountPath: /rocks-nessie + readOnly: false + {{- end }} + {{- if .Values.log.file.enabled }} + - name: logs-storage + mountPath: {{ .Values.log.file.logsDir }} + readOnly: false + {{- end }} + {{- if and (hasPrefix "BIGTABLE" .Values.versionStoreType) (.Values.bigtable.secret) }} + - name: bigtable-creds + mountPath: /bigtable-nessie + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- tpl (toYaml .Values.extraVolumeMounts) . | nindent 12 }} + {{- end }} + env: + {{- if hasPrefix "DYNAMODB" .Values.versionStoreType -}} + {{- include "nessie.secretToEnv" (list .Values.dynamodb.secret "awsAccessKeyId" "AWS_ACCESS_KEY_ID" . ) | trim | nindent 12 -}} + {{- include "nessie.secretToEnv" (list .Values.dynamodb.secret "awsSecretAccessKey" "AWS_SECRET_ACCESS_KEY" . ) | trim | nindent 12 -}} + {{- end -}} + {{- if hasPrefix "MONGODB" .Values.versionStoreType }} + {{- include "nessie.secretToEnv" (list .Values.mongodb.secret "username" "quarkus.mongodb.credentials.username" . ) | trim | nindent 12 -}} + {{- include "nessie.secretToEnv" (list .Values.mongodb.secret "password" "quarkus.mongodb.credentials.password" . ) | trim | nindent 12 -}} + {{- end -}} + {{- if hasPrefix "CASSANDRA" .Values.versionStoreType }} + {{- include "nessie.secretToEnv" (list .Values.cassandra.secret "username" "quarkus.cassandra.auth.username" . ) | trim | nindent 12 -}} + {{- include "nessie.secretToEnv" (list .Values.cassandra.secret "password" "quarkus.cassandra.auth.password" . ) | trim | nindent 12 -}} + {{- end -}} + {{- if hasPrefix "JDBC" .Values.versionStoreType }} + {{- $oldConfig := .Values.postgres | default dict }} + {{- $newConfig := .Values.jdbc | default dict }} + {{- $jdbcUrl := coalesce $oldConfig.jdbcUrl $newConfig.jdbcUrl }} + {{- $secret := coalesce $oldConfig.secret $newConfig.secret }} + {{- $dbKind := include "nessie.dbKind" $jdbcUrl }} + {{- include "nessie.secretToEnv" (list $secret "username" (printf "quarkus.datasource.%s.username" $dbKind) . ) | trim | nindent 12 }} + {{- include "nessie.secretToEnv" (list $secret "password" (printf "quarkus.datasource.%s.password" $dbKind) . ) | trim | nindent 12 }} + {{- end -}} + {{- if hasPrefix "BIGTABLE" .Values.versionStoreType }} + {{- if .Values.bigtable.secret }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /bigtable-nessie/sa_credentials.json + {{- end }} + {{- end -}} + {{- if .Values.authentication.enabled -}} + {{- include "nessie.secretToEnv" (list .Values.authentication.oidcClientSecret "key" "quarkus.oidc.credentials.secret" . ) | trim | nindent 12 -}} + {{- include "nessie.secretToEnv" (list .Values.authentication.oidcClientSecret "key" "quarkus.oidc.ui-app.credentials.secret" . ) | trim | nindent 12 -}} + {{- end -}} + {{- if .Values.catalog.enabled -}} + {{- include "nessie.catalogStorageEnv" . | trim | nindent 12 -}} + {{- end -}} + {{- if .Values.extraEnv }} + {{- tpl (toYaml .Values.extraEnv) . | nindent 12 }} + {{- end }} + {{- include "nessie.containerPorts" . | trim | nindent 10 }} + livenessProbe: + httpGet: + path: /q/health/live + port: {{ .Values.managementService.portName }} + scheme: HTTP + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + terminationGracePeriodSeconds: {{ .Values.livenessProbe.terminationGracePeriodSeconds }} + readinessProbe: + httpGet: + path: /q/health/ready + port: {{ .Values.managementService.portName }} + scheme: HTTP + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + resources: + {{- tpl (toYaml .Values.resources) . | nindent 12 }} + volumes: + - name: nessie-config + configMap: + name: {{ include "nessie.fullname" . }} + - name: temp-dir + emptyDir: {} + {{- if hasPrefix "ROCKSDB" .Values.versionStoreType }} + - name: rocks-storage + persistentVolumeClaim: + claimName: {{ include "nessie.fullname" . }} + {{- end }} + {{- if .Values.log.file.enabled }} + - name: logs-storage + persistentVolumeClaim: + claimName: {{ include "nessie.fullname" . }}-logs + {{- end }} + {{- if and (hasPrefix "BIGTABLE" .Values.versionStoreType) (.Values.bigtable.secret) }} + - name: bigtable-creds + secret: + secretName: {{ .Values.bigtable.secret.name }} + items: + - key: {{ .Values.bigtable.secret.key }} + path: sa_credentials.json + {{- end }} + {{- if .Values.extraVolumes }} + {{- tpl (toYaml .Values.extraVolumes) . | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: + {{- tpl (toYaml .Values.nodeSelector) . | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- tpl (toYaml .Values.affinity) . | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: + {{- tpl (toYaml .Values.tolerations) . | nindent 8 }} + {{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/hpa.yaml b/addons/nessie/0.103/chart/nessie/templates/hpa.yaml new file mode 100644 index 00000000..16560081 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/hpa.yaml @@ -0,0 +1,65 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +{{- if .Values.autoscaling.enabled }} +{{- if .Capabilities.APIVersions.Has "autoscaling/v2" -}} +apiVersion: autoscaling/v2 +{{- else if .Capabilities.APIVersions.Has "autoscaling/v2beta2" -}} +apiVersion: autoscaling/v2beta2 +{{- else -}} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "nessie.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if or (.Capabilities.APIVersions.Has "autoscaling/v2") (.Capabilities.APIVersions.Has "autoscaling/v2beta2") }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if or (.Capabilities.APIVersions.Has "autoscaling/v2") (.Capabilities.APIVersions.Has "autoscaling/v2beta2") }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/ingress.yaml b/addons/nessie/0.103/chart/nessie/templates/ingress.yaml new file mode 100644 index 00000000..5e782865 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/ingress.yaml @@ -0,0 +1,75 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +{{- $kubeVersion := .Capabilities.KubeVersion.Version -}} +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "nessie.fullname" . -}} +{{- if semverCompare ">=1.22-0" $kubeVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" $kubeVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className | quote }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + {{- $portName := dig "service" "portName" (get (first $.Values.service.ports) "name") . }} + {{- $svcName := printf "%s%s" $fullName (dig "service" "nameSuffix" "" .) }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: {{ default "ImplementationSpecific" $.Values.ingress.pathType }} + backend: + {{- if semverCompare ">=1.22-0" $kubeVersion }} + service: + name: {{ $svcName }} + port: + name: {{ $portName }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $portName }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/secrets.yaml b/addons/nessie/0.103/chart/nessie/templates/secrets.yaml new file mode 100644 index 00000000..6c46c6a6 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/secrets.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "nessie.fullname" . }}-datasource-creds + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + username: "{{ .Values.jdbc.username }}" + password: "{{ .Values.jdbc.password }}" +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "nessie.fullname" . }}-awscreds + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + aws_access_key_id: "{{ .Values.catalog.storage.s3.defaultOptions.awsAccessKeyId }}" + aws_secret_access_key: "{{ .Values.catalog.storage.s3.defaultOptions.awsSecretAccessKey }}" diff --git a/addons/nessie/0.103/chart/nessie/templates/service.yaml b/addons/nessie/0.103/chart/nessie/templates/service.yaml new file mode 100644 index 00000000..b3be6eb9 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/service.yaml @@ -0,0 +1,118 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + selector: + {{- include "nessie.selectorLabels" . | nindent 4 }} + ports: + {{- range .Values.service.ports }} + - name: {{ .name }} + port: {{ .number }} + targetPort: {{ .number }} + protocol: TCP + {{- end }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + internalTrafficPolicy: {{ .Values.service.internalTrafficPolicy }} + {{- if ge (int $.Capabilities.KubeVersion.Minor) 31 }} + trafficDistribution: {{ .Values.service.trafficDistribution }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nessie.fullname" . }}-mgmt + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- with .Values.managementService.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "nessie.selectorLabels" . | nindent 4 }} + ports: + - name: {{ .Values.managementService.portName }} + port: {{ .Values.managementService.portNumber }} + targetPort: {{ .Values.managementService.portNumber }} + protocol: TCP + publishNotReadyAddresses: true +{{- range $i, $svc := .Values.extraServices }} +{{- if not $svc.nameSuffix }} +{{- fail (printf "extraServices[%d]: missing required nameSuffix" $i) }} +{{- else if eq $svc.nameSuffix "-mgmt" }} +{{- fail (printf "extraServices[%d]: invalid nameSuffix" $i) }} +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nessie.fullname" $ }}{{ $svc.nameSuffix }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "nessie.labels" $ | nindent 4 }} + {{- with $svc.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ $svc.type }} + selector: + {{- include "nessie.selectorLabels" $ | nindent 4 }} + ports: + {{- range $svc.ports }} + - name: {{ .name }} + port: {{ .number }} + targetPort: {{ .number }} + protocol: TCP + {{- end }} + {{- if $svc.sessionAffinity }} + sessionAffinity: {{ $svc.sessionAffinity }} + {{- end }} + {{- if $svc.clusterIP }} + clusterIP: {{ $svc.clusterIP }} + {{- end }} + {{- if and $svc.externalTrafficPolicy (or (eq $svc.type "LoadBalancer") (eq $svc.type "NodePort")) }} + externalTrafficPolicy: {{ $svc.externalTrafficPolicy }} + {{- end }} + {{- if $svc.internalTrafficPolicy }} + internalTrafficPolicy: {{ $svc.internalTrafficPolicy }} + {{- end }} + {{- if and (ge (int $.Capabilities.KubeVersion.Minor) 31) ($svc.trafficDistribution) }} + trafficDistribution: {{ $svc.trafficDistribution }} + {{- end }} +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/serviceaccount.yaml b/addons/nessie/0.103/chart/nessie/templates/serviceaccount.yaml new file mode 100644 index 00000000..5bd0ceda --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/serviceaccount.yaml @@ -0,0 +1,31 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "nessie.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/servicemonitor.yaml b/addons/nessie/0.103/chart/nessie/templates/servicemonitor.yaml new file mode 100644 index 00000000..f60b2a6f --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/servicemonitor.yaml @@ -0,0 +1,47 @@ +{{- if false}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "nessie.fullname" . }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- with .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + spec: + endpoints: + - port: {{ .Values.managementService.portName | default "metrics" }} + path: /q/metrics + interval: 30s + selector: + matchLabels: + {{- include "nessie.selectorLabels" . | nindent 6 }} +{{- end }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{ toYaml .Values.metrics.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.managementService.portName }} + scheme: http + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + path: /q/metrics + {{- with .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "nessie.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/templates/storage.yaml b/addons/nessie/0.103/chart/nessie/templates/storage.yaml new file mode 100644 index 00000000..7147f364 --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/templates/storage.yaml @@ -0,0 +1,63 @@ +{{/** + + Copyright (C) 2024 Dremio + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +**/}} + +{{- if or (eq .Values.versionStoreType "ROCKS") (eq .Values.versionStoreType "ROCKSDB") }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "nessie.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: {{ .Values.rocksdb.storageClassName }} + resources: + requests: + storage: "{{ .Values.rocksdb.storageSize }}" +{{- if .Values.rocksdb.selectorLabels }} + selector: + matchLabels: + {{- toYaml .Values.rocksdb.selectorLabels | nindent 6 }} +{{- end }} +{{- end }} +--- +{{- if .Values.log.file.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "nessie.fullname" . }}-logs + namespace: {{ .Release.Namespace }} + labels: + {{- include "nessie.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: {{ .Values.log.file.storage.className }} + resources: + requests: + storage: "{{ .Values.log.file.storage.size }}" +{{- if .Values.log.file.storage.selectorLabels }} + selector: + matchLabels: + {{- toYaml .Values.log.file.storage.selectorLabels | nindent 6 }} +{{- end }} +{{- end }} diff --git a/addons/nessie/0.103/chart/nessie/values.yaml b/addons/nessie/0.103/chart/nessie/values.yaml new file mode 100644 index 00000000..8db1f8ca --- /dev/null +++ b/addons/nessie/0.103/chart/nessie/values.yaml @@ -0,0 +1,987 @@ +## +## Copyright (C) 2024 Dremio +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +# -- The number of replicas to deploy (horizontal scaling). +# Beware that replicas are stateless; don't set this number > 1 when using IN_MEMORY or ROCKSDB version store types. +replicaCount: 2 + +image: + # -- The image repository to pull from. + repository: registry.drycc.cc/drycc-addons/nessie + # -- The image pull policy. + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart version. + tag: "0" + # -- The path to the directory where the application.properties file should be mounted. + configDir: /opt/drycc/nessie/config + +# -- References to secrets in the same namespace to use for pulling any of the images used by this +# chart. Each entry is a LocalObjectReference to an existing secret in the namespace. The secret +# must contain a .dockerconfigjson key with a base64-encoded Docker configuration file. See +# https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ for more +# information. +imagePullSecrets: [] +# - name: registry-creds + +# -- Logging configuration. +log: + # -- The log level of the root category, which is used as the default log level for all categories. + level: INFO + # -- Configuration for the console appender. + console: + # -- Whether to enable the console appender. + enabled: true + # -- The log level of the console appender. + threshold: ALL + # -- Whether to log in JSON format. + json: false + # -- The log format to use. Ignored if JSON format is enabled. See + # https://quarkus.io/guides/logging#logging-format for details. + format: "%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n" + # -- Configuration for the file appender. + file: + # -- Whether to enable the file appender. + enabled: false + # -- The log level of the file appender. + threshold: ALL + # -- Whether to log in JSON format. + json: false + # -- The log format to use. Ignored if JSON format is enabled. See + # https://quarkus.io/guides/logging#logging-format for details. + format: "%d{yyyy-MM-dd HH:mm:ss,SSS} %h %N[%i] %-5p [%X{traceId},%X{spanId},%X{sampled}] [%c{3.}] (%t) %s%e%n" + # -- The local directory where log files are stored. The persistent volume claim will be mounted + # here. + logsDir: /deployments/logs + # -- The log file name. + fileName: nessie.log + # -- Log rotation configuration. + rotation: + # -- The maximum size of the log file before it is rotated. Should be expressed as a Kubernetes quantity. + maxFileSize: 100Mi + # -- The maximum number of backup files to keep. + maxBackupIndex: 5 + # -- An optional suffix to append to the rotated log files. If present, the rotated log files + # will be grouped in time buckets, and each bucket will contain at most maxBackupIndex files. + # The suffix must be in a date-time format that is understood by DateTimeFormatter. If the + # suffix ends with .gz or .zip, the rotated files will also be compressed using the + # corresponding algorithm. + fileSuffix: ~ # .yyyy-MM-dd.gz + # -- The log storage configuration. A persistent volume claim will be created using these + # settings. + storage: + # -- The storage class name of the persistent volume claim to create. + className: standard + # -- The size of the persistent volume claim to create. + size: 512Gi + # -- Labels to add to the persistent volume claim spec selector; a persistent volume with + # matching labels must exist. Leave empty if using dynamic provisioning. + selectorLabels: {} + # app.kubernetes.io/name: nessie + # app.kubernetes.io/instance: RELEASE-NAME + # -- Configuration for the Sentry appender. See https://sentry.io and + # https://docs.quarkiverse.io/quarkus-logging-sentry/dev for more information. + sentry: + # -- Whether to enable the Sentry appender. + enabled: false + # -- The Sentry DSN. Required. + dsn: ~ # "https://abcd@sentry.io/1234" + # -- The log level of the Sentry appender. + level: ERROR + # -- The environment to report to Sentry. Optional. + environment: ~ + # -- The release version to report to Sentry. Optional. + release: ~ + # -- Package prefixes that belong to your application. + inAppPackages: + - org.projectnessie + # -- Configuration for specific log categories. + categories: + org.projectnessie: INFO + # Useful to debug configuration issues: + # io.smallrye.config: DEBUG + +# -- Which type of version store to use: IN_MEMORY, ROCKSDB, DYNAMODB2, MONGODB2, CASSANDRA2, JDBC2, BIGTABLE. +# Note: the version store type JDBC is deprecated, please use the Nessie Server Admin Tool to migrate to JDBC2. +# Note: the version store type CASSANDRA is deprecated, please use the Nessie Server Admin Tool to migrate to CASSANDRA2. +# Note: the version store type DYNAMODB is deprecated, please use the Nessie Server Admin Tool to migrate to DYNAMODB2. +# Note: the version store type MONGODB is deprecated, please use the Nessie Server Admin Tool to migrate to MONGODB2. +versionStoreType: JDBC2 + +# Cassandra settings. Only required when using CASSANDRA version store type; ignored otherwise. +cassandra: + keyspace: nessie + # -- The contact points for the Cassandra cluster. At least one contact point must be provided, + # but more can be added for redundancy. The format is a comma-separated list of host:port elements. + contactPoints: cassandra.cassandra.svc.cluster.local:9042 + localDatacenter: datacenter1 + secret: + # -- The secret name to pull Cassandra credentials from. + name: cassandra-creds + # -- The secret key storing the Cassandra username. + username: cassandra_username + # -- The secret key storing the Cassandra password. + password: cassandra_password + +# RocksDB settings. Only required when using ROCKSDB version store type; ignored otherwise. +rocksdb: + # -- The storage class name of the persistent volume claim to create. + storageClassName: standard + # -- The size of the persistent volume claim to create. + storageSize: 1Gi + # -- Labels to add to the persistent volume claim spec selector; a persistent volume with matching labels must exist. + # Leave empty if using dynamic provisioning. + selectorLabels: + {} + # app.kubernetes.io/name: nessie + # app.kubernetes.io/instance: RELEASE-NAME + +# DynamoDB settings. Only required when using DYNAMODB version store type; ignored otherwise. +dynamodb: + # -- The AWS region to use. + region: us-west-2 + # -- The name of the profile that should be used, when loading AWS credentials from a profile + # file. Required only if no secret is provided below. + profile: default + secret: + # -- The secret name to pull AWS credentials from. Optional; if not present, the default AWS + # credentials provider chain is used. + name: awscreds + # -- The secret key storing the AWS secret key id. + awsAccessKeyId: aws_access_key_id + # -- The secret key storing the AWS secret access key. + awsSecretAccessKey: aws_secret_access_key + +## Mongo DB settings. Only required when using MONGODB version store type; ignored otherwise. +mongodb: + # -- The MongoDB database name. + name: nessie + # -- The MongoDB connection string. + connectionString: mongodb://localhost:27017 + secret: + # -- The secret name to pull MongoDB credentials from. + name: mongodb-creds + # -- The secret key storing the MongoDB username. + username: mongodb_username + # -- The secret key storing the MongoDB password. + password: mongodb_password + +# JDBC datasource settings. Only required when using JDBC version store type; ignored otherwise. +jdbc: + # -- The JDBC connection string. If you are using Nessie OSS images, then only + # PostgreSQL, MariaDB and MySQL URLs are supported. Check your JDBC driver documentation + # for the correct URL format. + jdbcUrl: jdbc:postgresql://localhost:5432/my_database?currentSchema=nessie + secret: + # -- The secret name to pull datasource credentials from. + name: datasource-creds + # -- The secret key storing the datasource username. + username: username + # -- The secret key storing the datasource password. + password: password + +# BigTable settings. Only required when using BIGTABLE version store type; ignored otherwise. +bigtable: + # -- The Google Cloud project ID. + projectId: my-gcp-project + # -- The Google Cloud Bigtable instance ID. + instanceId: nessie-bigtable + # -- The Google Cloud Bigtable app profile ID. + appProfileId: default + # -- The secret to use to authenticate against BigTable. + # When provided, it is assumed that authentication will use a service account JSON key. + # See https://cloud.google.com/iam/docs/keys-create-delete for details on how to create a + # service account key. + # If left empty, then Workload Identity usage is assumed instead; in this case, make sure that + # the pod's service account has been granted access to BigTable. + # See https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to + # for details on how to create a suitable service account. + # Important: when using Workload Identity, unless the cluster is in Autopilot mode, it is also + # required to add the following nodeSelector label: + # iam.gke.io/gke-metadata-server-enabled: "true" + # This is not done automatically by this chart because this selector would be invalid for + # Autopilot clusters. + secret: {} + # # -- The secret name to pull a valid Google Cloud service account key from. + # name: bigtable-creds + # # -- The secret key storing the Google Cloud service account JSON key. + # key: sa_json + +# -- The Nessie catalog server configuration. +catalog: + + # -- Whether to enable the REST catalog service. + enabled: true + + # -- Iceberg catalog settings. + iceberg: + + # -- The default warehouse name. Required. This is just a symbolic name; it must refer to a + # declared warehouse below. + defaultWarehouse: warehouse1 + + # -- Iceberg config defaults applicable to all clients and warehouses. Any properties that are + # common to all iceberg clients should be included here. They will be passed to all clients on + # all warehouses as config defaults. These defaults can be overridden on a per-warehouse basis, + # see below. + configDefaults: {} + # io-impl: org.apache.iceberg.hadoop.HadoopFileIO + + # -- Iceberg config overrides applicable to all clients and warehouses. Any properties that are + # common to all iceberg clients should be included here. They will be passed to all clients on + # all warehouses as config overrides. These overrides can be overridden on a per-warehouse + # basis, see below. + configOverrides: {} + # s3.acl: public-read-write + + # -- Iceberg warehouses. Each warehouse is a location where Iceberg tables are stored. Each + # warehouse has a name, a location, and optional config defaults and overrides. At least one + # warehouse must be defined. + warehouses: + # -- Symbolic name of the warehouse. Required. + - name: warehouse1 + # -- Location of the warehouse. Required. Used to determine the base location of a table. + # Scheme must be either s3 (Amazon S3), gs (Google GCS) or abfs / abfss (Azure ADLS). Storage + # properties for each location can be defined below. + location: s3://bucket1/ + # -- Iceberg config defaults specific to this warehouse. They override any defaults specified + # above in catalog.iceberg.configDefaults. + configDefaults: {} + # -- Iceberg config overrides specific to this warehouse. They override any defaults specified + # above in catalog.iceberg.configOverrides. + configOverrides: {} + # In rare cases it might be legit to turn off the object-stores readiness check. + objectStoresHealthCheckEnabled: true + + # -- Catalog storage settings. + storage: + # -- Interval after which a request is retried when Storage responds with some "retry later" + # error. Must be a valid ISO duration. + retryAfter: ~ + + s3: + + # Global S3 settings. Can be overridden on a per-bucket basis below. + defaultOptions: + # -- DNS name of the region, required for AWS. + region: us-west-2 + # -- Endpoint URI, required for private clouds. Optional; if not provided, the default is + # used. + endpoint: "https://bucket1.s3.amazonaws.com" + # -- Endpoint URI, required for private clouds. Optional; if not provided, the default is + # used. If the endpoint URIs for the Nessie server and clients differ, this one defines the + # endpoint used for the Nessie server. + externalEndpoint: ~ + # -- Whether to use path-style access. Optional; if not provided, the default is used. If + # true, path-style access will be used, as in: https:///. If false, a + # virtual-hosted style will be used instead, as in: https://.. + pathStyleAccess: true + # -- AWS Access point for this bucket. Access points can be used to perform S3 operations by + # specifying a mapping of bucket to access points. This is useful for multi-region access, + # cross-region access, disaster recovery, etc. See + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html. + accessPoint: ~ + # -- Authorize cross-region calls when contacting an access point. The default is false. + allowCrossRegionAccessPoint: ~ + # -- Controls the authentication mode for the Catalog server. Valid values are: + # - APPLICATION_GLOBAL: Use the default AWS credentials provider chain. + # - STATIC: Static credentials provided through the accessKeySecret option. + # The default is STATIC. + authType: ~ # STATIC + # --Optional parameter to disable S3 request signing. Default is to enable S3 request signing. + requestSigningEnabled: ~ # true + # -- The STS endpoint. Optional; if not provided, the default is used. This parameter must + # be set if the cloud provider is not AMAZON and the catalog is configured to use S3 + # sessions (e.g. to use the "assume role" functionality). + stsEndpoint: ~ # "https://sts.amazonaws.com" + + clientIam: + # -- Whether to enable vended credentials functionality. If this option is enabled, the + # server will temporarily assume the configured role, then pass the returned session + # credentials down to the client, for each table that is created, updated or registered. + # Vended credentials are not cached server-side. + enabled: ~ # false + # -- The ARN of the role to assume for accessing S3 data. This parameter is required for + # Amazon S3, but may not be required for other storage providers (e.g. Minio does not use it + # at all). + roleArn: ~ # "arn:aws:iam::123456789012:role/role-name" + # -- The IAM policy in JSON format to be used as an inline session policy when calling the + # assume-role endpoint. Optional. + policy: ~ # "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": \"s3:*\", \"Resource\": \"*\" } ] }" + # -- An identifier for the assumed role session. This parameter is most important in cases + # when the same role is assumed by different principals in different use cases. + roleSessionName: ~ # nessie + # -- An identifier for the party assuming the role. This parameter must match the external + # ID configured in IAM rules that govern the assume role process for the specified roleArn. + externalId: ~ + # -- A higher bound estimate of the expected duration of client "sessions" working with data in + # this bucket. A session, for example, is the lifetime of an Iceberg REST catalog object on + # the client side. This value is used for validating expiration times of credentials + # associated with the warehouse. If unset, a default of one hour is assumed. + sessionDuration: ~ + # -- Additional IAM policy statements in JSON format to add to generated per-table IAM policies. + statements: ~ + # - >- + # { + # "Effect": "Allow", + # "Action": "s3:GetObject", + # "Resource": "arn:aws:s3:::bucket1.{{ .Release.Namespace }}/*" + # } + # - >- + # { + # "Effect": "Allow", + # "Action": "s3:PutObject", + # "Resource": "arn:aws:s3:::bucket1.{{ .Release.Namespace }}/*" + # } + + # -- Settings only relevant when clientAuthenticationMode is ASSUME_ROLE. + serverIam: + # -- Whether to enable server assume-role functionality. If this option is enabled, the + # server will attempt to assume the configured role at startup and cache the returned + # session credentials. + enabled: ~ # false + # -- The ARN of the role to assume for accessing S3 data. This parameter is required for + # Amazon S3, but may not be required for other storage providers (e.g. Minio does not use it + # at all). + roleArn: ~ # "arn:aws:iam::123456789012:role/role-name" + # -- The IAM policy in JSON format to be used as an inline session policy when calling the + # assume-role endpoint. Optional. + policy: ~ # "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": \"s3:*\", \"Resource\": \"*\" } ] }" + # -- An identifier for the assumed role session. This parameter is most important in cases + # when the same role is assumed by different principals in different use cases. + roleSessionName: ~ # nessie + # -- An identifier for the party assuming the role. This parameter must match the external + # ID configured in IAM rules that govern the assume role process for the specified roleArn. + externalId: ~ + # -- A higher bound estimate of the expected duration of client "sessions" working with data in + # this bucket. A session, for example, is the lifetime of an Iceberg REST catalog object on + # the client side. This value is used for validating expiration times of credentials + # associated with the warehouse. If unset, a default of one hour is assumed. + sessionDuration: ~ + + # -- AWS credentials. Required when serverAuthenticationMode is STATIC. + accessKeySecret: + # -- The secret name to pull AWS credentials from. + name: awscreds + + # -- The secret key storing the AWS secret key id. + awsAccessKeyId: aws_access_key_id + # -- The secret key storing the AWS secret access key. + awsSecretAccessKey: aws_secret_access_key + + # -- Per-bucket S3 settings. Override the general settings above. + buckets: [] + # - name: bucket1 + # authority: bucket1 + # pathPrefix: path/in/the/bucket + # endpoint: "https://bucket1.s3.amazonaws.com" + # accessKeySecret: + # name: awscreds + # awsAccessKeyId: aws_access_key_id + # awsSecretAccessKey: aws_secret_access_key + + # -- S3 transport settings. Not overridable on a per-bucket basis. + transport: + # -- Override the default maximum number of pooled connections. + maxHttpConnections: ~ + # -- Override the default connection read timeout. Must be a valid ISO duration. + readTimeout: ~ + # -- Override the default TCP connect timeout. Must be a valid ISO duration. + connectTimeout: ~ + # -- Override default connection acquisition timeout. This is the time a request will wait + # for a connection from the pool. Must be a valid ISO duration. + connectionAcquisitionTimeout: ~ + # -- Override default max idle time of a pooled connection. Must be a valid ISO duration. + connectionMaxIdleTime: ~ + # -- Override default time-time of a pooled connection. Must be a valid ISO duration. + connectionTimeToLive: ~ + # -- Override default behavior whether to expect an HTTP/100-Continue. Must be a valid ISO + # duration. + expectContinueEnabled: ~ + + sessionCredentials: + # -- The time period to subtract from the S3 session credentials (assumed role credentials) + # expiry time to define the time when those credentials become eligible for refreshing. + # Not overridable on a per-bucket basis. The default is PT5M (5 minutes). + sessionCredentialRefreshGracePeriod: ~ # PT5M + # -- Maximum number of entries to keep in the session credentials cache (assumed role + # credentials). Not overridable on a per-bucket basis. The default is 1000. + sessionCredentialCacheMaxEntries: ~ # 1000 + # -- Maximum number of entries to keep in the STS clients cache. Not overridable on a + # per-bucket basis. The default is 50. + stsClientsCacheMaxEntries: ~ # 50 + + gcs: + + # Global GCS settings. Can be overridden on a per-bucket basis below. + defaultOptions: + # -- The default endpoint override to use. The endpoint is almost always used for testing + # purposes. If the endpoint URIs for the Nessie server and clients differ, this one defines + # the endpoint used for the Nessie server. + host: ~ + # -- When using a specific endpoint, see host, and the endpoint URIs for the Nessie server + # differ, you can specify the URI passed down to clients using this setting. Otherwise, + # clients will receive the value from the host setting. + externalHost: ~ + # -- Optionally specify the user project (Google term). + userProject: ~ + # -- The Google project ID. + projectId: ~ + # -- The Google quota project ID. + quotaProjectId: ~ + # -- The Google client lib token. + clientLibToken: ~ + # -- The authentication type to use. Valid values are: NONE, USER, SERVICE_ACCOUNT, + # ACCESS_TOKEN, APPLICATION_DEFAULT. The default is NONE. + authType: ~ + + # -- The Google Cloud service account key secret. This is required when authType is USER or + # SERVICE_ACCOUNT. + authCredentialsJsonSecret: + # -- The secret name to pull a valid Google Cloud service account key from. + name: ~ + # -- The secret key storing the Google Cloud service account JSON key. + key: ~ + + # -- The oauth2 token secret. This is required when authType is ACCESS_TOKEN. + oauth2TokenSecret: + # # -- The secret name to pull a valid Google Cloud service account key from. + name: ~ + # # -- The secret key storing the token. + token: ~ + # # -- The secret key storing the token's expiresAt value (optional). + expiresAt: ~ + + # -- Customer-supplied AES256 key for blob encryption when writing. Currently unsupported. + encryptionKey: ~ + # -- Customer-supplied AES256 key for blob decryption when reading. Currently unsupported. + decryptionKey: ~ + + # -- The read chunk size in bytes. Must be a valid ISO duration. + readChunkSize: ~ + # -- The write chunk size in bytes. Must be a valid ISO duration. + writeChunkSize: ~ + # -- The delete batch size. + deleteBatchSize: ~ + + # -- Per-bucket GCS settings. Override the general settings above. + buckets: [] + # - name: bucket1 + # authority: bucket1 + # pathPrefix: path/in/the/bucket + # authType: ACCESS_TOKEN + # oauth2TokenSecret: + # name: gcs-creds + # key: token + # expiresAt: expiresAt + + # -- GCS transport settings. Not overridable on a per-bucket basis. + transport: + # -- Override the default maximum number of attempts. + maxAttempts: ~ + # -- Override the default connection timeout. Must be a valid ISO duration. + connectTimeout: ~ + # -- Override the default read timeout. Must be a valid ISO duration. + readTimeout: ~ + # -- Override the default initial retry delay. Must be a valid ISO duration. + initialRetryDelay: ~ + # -- Override the default maximum retry delay. Must be a valid ISO duration. + maxRetryDelay: ~ + # -- Override the default retry delay multiplier. Must be a valid ISO duration. + retryDelayMultiplier: ~ + # -- Override the default initial RPC timeout. Must be a valid ISO duration. + initialRpcTimeout: ~ + # -- Override the default maximum RPC timeout. Must be a valid ISO duration. + maxRpcTimeout: ~ + # -- Override the default RPC timeout multiplier. Must be a valid ISO duration. + rpcTimeoutMultiplier: ~ + # -- Override the default logical request timeout. Must be a valid ISO duration. + logicalTimeout: ~ + # -- Override the default total timeout. Must be a valid ISO duration. + totalTimeout: ~ + + adls: + + # Global ADLS settings. Can be overridden on a per-filesystem basis below. + defaultOptions: + # -- Custom HTTP endpoint. In case clients need to use a different URI, use externalEndpoint. + endpoint: ~ + # -- Custom HTTP endpoint to be used by clients. If not set, the endpoint value is used. + externalEndpoint: ~ + # -- The retry strategy to use. Valid values are: NONE, EXPONENTIAL_BACKOFF, FIXED_DELAY. + # The default is EXPONENTIAL_BACKOFF. + retryPolicy: ~ + # -- The maximum number of retries. Must be a positive integer. Default is 4. Optional. + # Valid if retryPolicy is EXPONENTIAL_BACKOFF or FIXED_DELAY. + maxRetries: ~ + # -- The maximum time allowed before a request is cancelled and assumed failed, default is + # Integer.MAX_VALUE. Optional. Must be a valid ISO duration. Valid if retryPolicy is + # EXPONENTIAL_BACKOFF or FIXED_DELAY. + tryTimeout: ~ + # -- Specifies the amount of delay to use before retrying an operation, default value is + # PT4S (4 seconds) when retryPolicy is EXPONENTIAL_BACKOFF and PT30S (30 seconds) when + # retryPolicy is FIXED_DELAY. Must be a valid ISO duration. + retryDelay: ~ + # -- Specifies the maximum delay allowed before retrying an operation, default value is + # PT120s (120 seconds). Must be a valid ISO duration. Valid if retryPolicy is + # EXPONENTIAL_BACKOFF. + maxRetryDelay: ~ + # -- The authentication type to use. Valid values are: NONE, STORAGE_SHARED_KEY, SAS_TOKEN, + # APPLICATION_DEFAULT. The default is NONE. + authType: ~ + # -- A secret containing the account name and key to use. Required when authType is + # STORAGE_SHARED_KEY. + accountSecret: + # -- Name of the secret containing the account name and key. + name: ~ + # -- Secret key containing the fully-qualified account name, e.g. "myaccount.dfs.core.windows.net". + accountName: ~ + # -- Secret key containing the account key. + accountKey: ~ + # -- A secret containing the SAS token to use. Required when authType is SAS_TOKEN. + sasTokenSecret: + # -- Name of the secret containing the SAS token. + name: ~ + # -- Secret key containing the SAS token. + sasToken: ~ + + # -- Per-filesystem ADLS settings. Override the general settings above. + filesystems: [] + # - name: filesystem1 + # authority: bucket1 + # pathPrefix: path/in/the/bucket + # endpoint: http://localhost/adlsgen2/bucket + # accountSecret: + # name: adls-account-secret + # accountName: accountName + # accountKey: accountKeyRef + + # -- ADLS transport settings. Not overridable on a per-bucket basis. + transport: + # -- The default maximum connection pool size is determined by the underlying HTTP client. + # Not overridable on a per-filesystem basis. + maxHttpConnections: ~ + # -- Sets the connection timeout for a request to be sent. The default is PT10S (10 + # seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. + connectTimeout: ~ + # -- Sets the read timeout duration used when reading the server response. The default is + # PT60S (60 seconds). Must be a valid ISO duration. Not overridable on a per-filesystem + # basis. + readTimeout: ~ + # -- Sets the write timeout duration used when writing the request to the server. The + # default is PT60S (60 seconds). Must be a valid ISO duration. Not overridable on a + # per-filesystem basis. + writeTimeout: ~ + # -- Sets the maximum idle time for a connection to be kept alive. The default is PT60S (60 + # seconds). Must be a valid ISO duration. Not overridable on a per-filesystem basis. + connectionIdleTimeout: ~ + # -- The size of each data chunk returned from the service in bytes. The default value is 4 + # MB. Not overridable on a per-filesystem basis. + readBlockSize: ~ + # -- Sets the block size in bytes to transfer at a time. Not overridable on a per-filesystem + # basis. + writeBlockSize: ~ + + # -- Custom ADLS configuration options, see javadocs of com.azure.core.util.Configuration. + # Not overridable on a per-filesystem basis. + advancedConfig: {} + + +# -- Advanced configuration. +# You can pass here any valid Nessie or Quarkus configuration property. +# Any property that is defined here takes precedence over all the other configuration values generated by this chart. +# Properties can be passed "flattened" or as nested YAML objects (see examples below). +advancedConfig: + {} +# Nessie version store settings +# ----------------------------- +# +# See description of the various cache size parameters and their defaults. +# +# nessie.version.store.persist.cache-capacity-mb: (defaults to fractional size, based on max-heap size) +# nessie.version.store.persist.cache-capacity-fraction-of-heap: 0.7 +# nessie.version.store.persist.cache-capacity-fraction-adjust-mb: 256 +# nessie.version.store.persist.cache-capacity-fraction-min-size-mb: 64 +# +# nessie.server.default-branch: my-branch +# +# nessie.version.store.persist.repository-id: my-repository +# +# Reverse Proxy Settings +# ---------------------- +# +# These config options are mentioned only for documentation purposes. Consult the +# Quarkus documentation for "Running behind a reverse proxy" and configure those +# depending on your actual needs. +# +# See https://quarkus.io/guides/http-reference#reverse-proxy +# +# Do NOT enable these option unless your reverse proxy (for example istio or nginx) +# is properly setup to set these headers but also filter those from incoming requests. +# +# quarkus: +# http: +# proxy: +# proxy-address-forwarding: "true" +# allow-x-forwarded: "true" +# enable-forwarded-host: "true" +# enable-forwarded-prefix: "true" +# trusted-proxies: "127.0.0.1" + +# -- Advanced configuration via Environment Variables. +# Extra environment variables to add to the Nessie server container. +# You can pass here any valid EnvVar object: +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#envvar-v1-core +# This can be useful to get configuration values from Kubernetes secrets or config maps. +extraEnv: + [] +# - name: QUARKUS_MONGODB_APPLICATION_NAME +# value: my-app +# - name: QUARKUS_MONGODB_TLS +# valueFrom: +# configMapKeyRef: +# name: mongodb-config +# key: tls + +authentication: + # -- Specifies whether authentication for the nessie server should be enabled. + enabled: false + # -- Sets the base URL of the OpenID Connect (OIDC) server. Required if authentication is enabled (unless local token introspection is enforced through advanced configuration). + oidcAuthServerUrl: ~ # http://example.com:8080/auth/realms/master + # -- Set the OIDC client ID. If Nessie must contact the OIDC server, this is the client ID that will be used to identify the application. + oidcClientId: nessie + # -- Set the OIDC client secret. Whether the client secret is required depends on the OIDC server configuration. + # For Keycloak, the client secret is generally not required as the returned tokens can be introspected locally by Nessie. + # If token introspection requires a round-trip to the OIDC server, the client secret is required. + oidcClientSecret: {} +# name: nessie-oidc-creds +# key: client-secret + +authorization: + # -- Specifies whether authorization for the nessie server should be enabled. + enabled: false + # -- The authorization rules when authorization.enabled=true. Example rules can be found at https://projectnessie.org/features/metadata_authorization/#authorization-rules + rules: + {} + # allowViewingBranch: op=='VIEW_REFERENCE' && role.startsWith('test_user') && ref.startsWith('allowedBranch') + # allowCommits: op=='COMMIT_CHANGE_AGAINST_REFERENCE' && role.startsWith('test_user') && ref.startsWith('allowedBranch') + +tracing: + # -- Specifies whether tracing for the nessie server should be enabled. + enabled: false + # -- The collector endpoint URL to connect to (required). + # The endpoint URL must have either the http:// or the https:// scheme. + # The collector must talk the OpenTelemetry protocol (OTLP) and the port must be its gRPC port (by default 4317). + # See https://quarkus.io/guides/opentelemetry for more information. + endpoint: "http://otlp-collector:4317" + # -- Which requests should be sampled. Valid values are: "all", "none", or a ratio between 0.0 and + # "1.0d" (inclusive). E.g. "0.5d" means that 50% of the requests will be sampled. + sample: "1.0d" + # -- Resource attributes to identify the nessie service among other tracing sources. + # See https://opentelemetry.io/docs/reference/specification/resource/semantic_conventions/#service. + # If left empty, traces will be attached to a service named "Nessie"; to change this, provide a service.name attribute here. + attributes: + {} + # service.name: my-nessie + +metrics: + # -- Specifies whether metrics for the nessie server should be enabled. + enabled: true + # -- Additional tags (dimensional labels) to add to the metrics. + tags: + {} + # service: nessie + # environment: production + + serviceMonitor: + # -- Specifies whether a ServiceMonitor for Prometheus operator should be created. + enabled: true + # -- The scrape interval; leave empty to let Prometheus decide. Must be a valid duration, e.g. 1d, 1h30m, 5m, 10s. + interval: "" + # -- Labels for the created ServiceMonitor so that Prometheus operator can properly pick it up. + labels: + {} + # release: prometheus + # -- Additional labels to add to the ServiceMonitor + additionalLabels: {} + # -- Relabeling rules to apply to metrics. Ref https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config. + metricRelabelings: [] + # - source_labels: [ __meta_kubernetes_namespace ] + # separator: ; + # regex: (.*) + # target_label: namespace + # replacement: $1 + # action: replace + +serviceAccount: + # -- Specifies whether a service account should be created. + create: true + # -- Annotations to add to the service account. + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template. + name: "" + +# -- Annotations to apply to nessie pods. +podAnnotations: {} + +# -- Additional Labels to apply to nessie pods. +podLabels: {} + +# -- Additional Labels to apply to nessie configmap. +configMapLabels: {} + +# -- Security context for the nessie pod. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/. +podSecurityContext: + # GID 10001 is compatible with Nessie OSS default images starting with 0.95.1; change this if you + # are using a different image. + fsGroup: 10001 + seccompProfile: + type: RuntimeDefault + +# -- Security context for the nessie container. See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/. +securityContext: + # UID 10000 and GID 10001 are compatible with Nessie OSS default images starting with 0.95.1; + # change this if you are using a different image. + runAsUser: 10000 + runAsGroup: 10001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + +# -- Nessie main service settings. +service: + # -- The type of service to create. + type: ClusterIP + # -- The ports the service will listen on. + # At least one port is required; the first port implicitly becomes the HTTP port that the + # application will use for serving API requests. By default, it's 19120. + # Note: port names must be unique and no more than 15 characters long. + ports: + - name: nessie-http + number: 19120 + # - name: nessie-https + # number: 19121 + # -- The session affinity for the service. Valid values are: None, ClientIP. + # ClientIP enables sticky sessions based on the client's IP address. + # This is generally beneficial to Nessie deployments, but some testing may be + # required in order to make sure that the load is distributed evenly among the pods. + # Also, this setting affects only internal clients, not external ones. + # If Ingress is enabled, it is recommended to set sessionAffinity to None. + sessionAffinity: None + # -- You can specify your own cluster IP address + # If you define a Service that has the .spec.clusterIP set to "None" then Kubernetes does not assign an IP address. + # Instead, DNS records for the service will return the IP addresses of each pod targeted by the server. This is + # called a headless service. + # See https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: "" + # -- The traffic policy fields control how traffic from internal and external sources are routed respectively. + # Valid values are Cluster and Local. + # Set the field to Cluster to route traffic to all ready endpoints. + # Set the field to Local to only route to ready node-local endpoints. + # If the traffic policy is Local and there are no node-local endpoints, traffic is dropped by kube-proxy + internalTrafficPolicy: Cluster + externalTrafficPolicy: Cluster + # -- The traffic distribution field provides another way to influence traffic routing within a Kubernetes Service. + # While traffic policies focus on strict semantic guarantees, traffic distribution allows you to express preferences + # such as routing to topologically closer endpoints. + # Valid values are: PreferClose + trafficDistribution: PreferClose + # -- Annotations to add to the service. + annotations: {} + +# -- Management service settings. These settings are used to configure liveness and readiness probes, +# and to configure the dedicated headless service that will expose health checks and metrics, e.g. +# for metrics scraping and service monitoring. +managementService: + # -- The name of the management port. Required. + portName: nessie-mgmt + # -- The port the management service listens on. By default, the management interface is exposed + # on HTTP port 9000. + portNumber: 9000 + # -- Annotations to add to the service. + annotations: {} + +# -- Additional service definitions. All service definitions always select all Nessie pods. Use +# this if you need to expose specific ports with different configurations. +extraServices: [] + # - # -- The suffix to append to the service name. Required. + # nameSuffix: "-ext" + # # -- The type of service to create. + # type: LoadBalancer + # # -- The ports the service will listen on. + # ports: + # - name: nessie-http + # number: 19120 + # - name: nessie-https + # number: 19121 + # sessionAffinity: None + # clusterIP: "" + # internalTrafficPolicy: Cluster + # externalTrafficPolicy: Cluster + # trafficDistribution: PreferClose + # annotations: {} + +# -- Nessie Ingress settings. +# These settings generate an Ingress resource that routes external traffic to the Nessie service. +# Consider enabling sticky sessions based on the remote client's IP address; +# this is generally beneficial to Nessie deployments, but some testing may be +# required in order to make sure that the load is distributed evenly among the pods. +# Check your ingress controller's documentation. +ingress: + # -- Specifies whether an ingress should be created. + enabled: false + # -- Specifies the ingressClassName; leave empty if you don't want to customize it. + className: "" + # -- Annotations to add to the ingress. + annotations: { + # nginx.ingress.kubernetes.io/upstream-hash-by: "$binary_remote_addr" + } + # -- Specifies the path type of host paths. Valid values are: "Prefix", "Exact" or "ImplementationSpecific". + pathType: ImplementationSpecific + # -- A list of host paths used to configure the ingress. + hosts: + - host: chart-example.local + paths: [] + # -- The service target for the ingress. + service: + # -- The port name to route traffic to. Must match one of the ports in service.ports or in + # extraServices.ports. Optional; if not provided, the first port in service.ports will be used. + portName: nessie-http + # -- The target service name suffix. Optional; if not provided, the main service will be + # targeted. Change this only if you are targeting a service defined in extraServices. + nameSuffix: "" + # -- A list of TLS certificates; each entry has a list of hosts in the certificate, + # along with the secret name used to terminate TLS traffic on port 443. + tls: [] +# - hosts: +# - chart-example1.local +# - chart-example2.local +# secretName: secret1 + +# -- Override the strategy for nessie deployment. +# Valid values for type are: RollingUpdate and Recreate. +# If you are using the ROCKSDB version store type then you should use Recreate. +# Max Surge will allow new pods to be created before old ones are culled. Do not enable this when using ROCKSDB +# version store type. +# Max Unavailable will allow old pods to be culled before replacements are created +# See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + {} + # type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 0 + # maxSurge: 1 + +# -- Configures the resources requests and limits for nessie pods. +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +resources: + {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + # -- Specifies whether automatic horizontal scaling should be enabled. + # Do not enable this when using ROCKSDB version store type. + enabled: false + # -- The minimum number of replicas to maintain. + minReplicas: 1 + # -- The maximum number of replicas to maintain. + maxReplicas: 3 + # -- Optional; set to zero or empty to disable. + targetCPUUtilizationPercentage: 80 + # -- Optional; set to zero or empty to disable. + targetMemoryUtilizationPercentage: + +# -- Node labels which must match for the nessie pod to be scheduled on that node. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector. +nodeSelector: + {} + # kubernetes.io/os: linux + +# -- A list of tolerations to apply to nessie pods. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/. +tolerations: [] +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Exists" +# effect: "NoSchedule" + +# -- Affinity and anti-affinity for nessie pods. See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity. +affinity: {} +# podAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# podAffinityTerm: +# topologyKey: kubernetes.io/hostname +# labelSelector: +# matchExpressions: +# - key: app.kubernetes.io/name +# operator: In +# values: +# - nessie + +# -- Configures the liveness probe for nessie pods. +livenessProbe: + # -- Number of seconds after the container has started before liveness probes are initiated. Minimum value is 0. + initialDelaySeconds: 5 + # -- How often (in seconds) to perform the probe. Minimum value is 1. + periodSeconds: 10 + # -- Minimum consecutive successes for the probe to be considered successful after having failed. Minimum value is 1. + successThreshold: 1 + # -- Minimum consecutive failures for the probe to be considered failed after having succeeded. Minimum value is 1. + failureThreshold: 3 + # -- Number of seconds after which the probe times out. Minimum value is 1. + timeoutSeconds: 10 + # -- Optional duration in seconds the pod needs to terminate gracefully upon probe failure. Minimum value is 1. + terminationGracePeriodSeconds: 30 + +# -- Configures the readiness probe for nessie pods. +readinessProbe: + # -- Number of seconds after the container has started before readiness probes are initiated. Minimum value is 0. + initialDelaySeconds: 5 + # -- How often (in seconds) to perform the probe. Minimum value is 1. + periodSeconds: 10 + # -- Minimum consecutive successes for the probe to be considered successful after having failed. Minimum value is 1. + successThreshold: 1 + # -- Minimum consecutive failures for the probe to be considered failed after having succeeded. Minimum value is 1. + failureThreshold: 3 + # -- Number of seconds after which the probe times out. Minimum value is 1. + timeoutSeconds: 10 + +# -- Extra volumes to add to the nessie pod. See https://kubernetes.io/docs/concepts/storage/volumes/. +extraVolumes: [] + # - name: extra-volume + # emptyDir: {} + +# -- Extra volume mounts to add to the nessie container. See https://kubernetes.io/docs/concepts/storage/volumes/. +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /usr/share/extra-volume + +# -- Add additional init containers to the nessie pod(s) See https://kubernetes.io/docs/concepts/workloads/pods/init-containers/. +extraInitContainers: [] + # - name: your-image-name + # image: your-image + # imagePullPolicy: Always + # command: ['sh', '-c', 'echo "hello world"'] diff --git a/addons/nessie/0.103/meta.yaml b/addons/nessie/0.103/meta.yaml new file mode 100644 index 00000000..61e0f497 --- /dev/null +++ b/addons/nessie/0.103/meta.yaml @@ -0,0 +1,30 @@ +name: nessie +version: "0.103.0" +id: fd1cbeda-00a5-11f0-8a54-dbe6761671cc +description: "nessie." +displayName: "nessie" +metadata: + displayName: "nessie" + provider: + name: drycc + supportURL: https://nessie.org/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/nessie +tags: nessie +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "service.type" + required: false + description: "config nessie service type" +- name: "jdbc" + required: true + description: "config nessie persist store" +- name: "catalog" + required: false + description: "config nessie catalog" +- name: "extraEnv" + required: false + description: "config nessie env" +archive: false diff --git a/addons/nessie/0.103/plans/standard-4c4g/bind.yaml b/addons/nessie/0.103/plans/standard-4c4g/bind.yaml new file mode 100644 index 00000000..a3905b88 --- /dev/null +++ b/addons/nessie/0.103/plans/standard-4c4g/bind.yaml @@ -0,0 +1,19 @@ +credential: +{{- if eq .Values.service.type "LoadBalancer" }} +- name: EXTERNAL_IP + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.status.loadBalancer.ingress[0].ip}' +{{- end }} +- name: CLUSTER_IP + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.spec.clusterIP}' +- name: PORT + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.spec.ports[?(@.name=="nessie-http")].port}' + diff --git a/addons/nessie/0.103/plans/standard-4c4g/meta.yaml b/addons/nessie/0.103/plans/standard-4c4g/meta.yaml new file mode 100644 index 00000000..5c43e6ca --- /dev/null +++ b/addons/nessie/0.103/plans/standard-4c4g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c4g" +id: dcaa216a-00a5-11f0-b8cd-9bd9f5fdeeed +description: "nessie plan standard-4c4g which limit 4c4g" +displayName: "4c4g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/nessie/0.103/plans/standard-4c4g/values.yaml b/addons/nessie/0.103/plans/standard-4c4g/values.yaml new file mode 100644 index 00000000..65b92df4 --- /dev/null +++ b/addons/nessie/0.103/plans/standard-4c4g/values.yaml @@ -0,0 +1,11 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-nessie-standard-4c4g + +resources: + limits: + cpu: 4000m + memory: 4096Mi + requests: + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/nessie/0.103/plans/standard-8c8g/bind.yaml b/addons/nessie/0.103/plans/standard-8c8g/bind.yaml new file mode 100644 index 00000000..a3905b88 --- /dev/null +++ b/addons/nessie/0.103/plans/standard-8c8g/bind.yaml @@ -0,0 +1,19 @@ +credential: +{{- if eq .Values.service.type "LoadBalancer" }} +- name: EXTERNAL_IP + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.status.loadBalancer.ingress[0].ip}' +{{- end }} +- name: CLUSTER_IP + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.spec.clusterIP}' +- name: PORT + valueFrom: + serviceRef: + name: {{ include "nessie.fullname" . }} + jsonpath: '{.spec.ports[?(@.name=="nessie-http")].port}' + diff --git a/addons/nessie/0.103/plans/standard-8c8g/meta.yaml b/addons/nessie/0.103/plans/standard-8c8g/meta.yaml new file mode 100644 index 00000000..17ad8c26 --- /dev/null +++ b/addons/nessie/0.103/plans/standard-8c8g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c8g" +id: c6ebd538-00b1-11f0-b921-d7a8e3333e3c +description: "nessie plan standard-8c8g which limit 8c8g" +displayName: "8c8g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/nessie/0.103/plans/standard-8c8g/values.yaml b/addons/nessie/0.103/plans/standard-8c8g/values.yaml new file mode 100644 index 00000000..899f4068 --- /dev/null +++ b/addons/nessie/0.103/plans/standard-8c8g/values.yaml @@ -0,0 +1,11 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-nessie-standard-8c8g + +resources: + limits: + cpu: 8000m + memory: 8192Mi + requests: + cpu: 1000m + memory: 1024Mi \ No newline at end of file diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/Chart.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/Chart.yaml index da518fb1..3d72445f 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/Chart.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/Chart.yaml @@ -8,8 +8,8 @@ dependencies: version: ~1.1.1 description: PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. engine: gotpl -home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql -icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +home: https://github.com/drycc/charts/tree/master/drycc/postgresql +icon: https://drycc.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png keywords: - postgresql - postgres diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/Chart.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/Chart.yaml new file mode 100644 index 00000000..3f8db7b6 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: "16" +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.1 +description: PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. +engine: gotpl +home: https://github.com/drycc/charts/tree/master/drycc/postgresql +icon: https://drycc.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + - patroni +maintainers: + - email: zhang.eamon@hotmail.com + name: zhangeamon +name: postgresql +sources: + - https://github.com/drycc-addons/ + - https://www.postgresql.org/ +version: "16.4" diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/README.md b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/README.md new file mode 100644 index 00000000..c407603f --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/README.md @@ -0,0 +1,166 @@ + +# Postgresql cluster addons +## Plans + +View and choose the service resource specifications you need. +``` + # drycc resources:plans postgresql-cluster +``` +### Resource specification list +| Resource Specification | Cores | MEMORY | Storage SIZE | +| :---: | :---: | :---: | :---: | +| standard-10 | 1C | 2G | 10G | +| standard-20 | 2C | 4G | 20G | +| standard-50 | 2C | 8G | 50G | +| standard-100 | 4C | 16G | 100G | +| standard-200 | 8C | 32G | 200G | +| standard-400 | 16C | 64G | 400G | +| standard-800 | 32C | 128G | 800G | + +In order to obtain a better experience, it is recommended not to exceed 80% usage of resource utilization for a long period of time. If there is a need for larger resource scale, please apply for private customization. + +## Create Postgresql Cluster Service instance + +- Create Postgresql service +``` +# drycc resources:create postgresql-cluster:standard-10 `my_pg_001` +``` +- View service status +``` +# drycc resources:describe `my_pg_001` +``` +- Bind service +``` +# drycc resources:bind `my_pg_001` +``` +- View resource status +``` +# drycc resources:describe `my_pg_001` +``` + +## Create Service with values file + +`vim values.yaml` +``` +# create or update pg instance template yaml +networkPolicy.allowNamespaces: + - mx-test1 +service.type: ClusterIP +metrics.enabled: true +backup: + # whether BackUP should be enabled + enabled: true + # Cron schedule for doing base backups + scheduleCronJob: "20 0 * * 0" + Amount of base backups to retain + retainBackups: 2 + s3: + awsAccessKeyID: "" + awsSecretAccessKey: "" + walGS3Prefix: "s3://xx" + awsEndpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 +``` +``` + drycc resources:create postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + +## Update Service +### Create app user and database + +- Login database web with admin user & password + +- Change administrator initial password +``` +ALTER USER administrator WITH ENCRYPTED PASSWORD 'newpassword'; +``` +- View total connections number in postgresql cluster; +``` +show max_connections ; +``` +- CREATE APP USER +``` +CREATE USER `myuser` WITH CONNECTION LIMIT `conn limit` LOGIN ENCRYPTED PASSWORD 'password'; +GRANT `myuser` to administrator ; +``` +- CREATE APP DATABASE +``` +CREATE DATABASE `mydb` OWNER `myuser`; + +``` +- CREATE EXTENSIONS +``` +CREATE EXTENSION pg_buffercache; +``` + +### Network Access + +Default access allow policy: only namespace scope. + +- allow `mx-test1` namespace access + +`vim values.yaml ` +``` +networkPolicy.allowNamespaces: + - mx-test1 +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + + - Assign external network IP address + +`vim values.yaml` +``` + service.type: LoadBlancer +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` +- View resource status +``` +# drycc resources:describe `my_pg_001` +``` + + ### Manger backup your data `Very important` + +`Strongly recommend enabling this feature.` +`Strongly recommend enabling this feature.` +`Strongly recommend enabling this feature.` + +PG data backup use S3 as backenp store. Choose an independent storage space `outside of the current environment` as your backup space. + +`vim values.yaml` +``` +backup: + # whether BackUP should be enabled + enabled: true + # Cron schedule for doing base backups + scheduleCronJob: "20 0 * * 0" + Amount of base backups to retain + retainBackups: 2 + s3: + awsAccessKeyID: DO9l771LqiwZkhhz + awsSecretAccessKey: R3Dv0NEmJBo8JFdn1q8jz49ArWwpDjFn + walGS3Prefix: mx-test +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + +You can modify multiple content at once, there is no need to modify part of it each time. + + +## Destroy Service + +- Unbind service first +``` +# drycc resources:unbind `my_pg_001` +``` +- Destroy service +``` +# drycc resources:destroy `my_pg_001` +``` +# 修改pg在容器中分配的动态共享内存不足的问题 +https://www.cnblogs.com/daniel-hutao/p/17903993.html \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/NOTES.txt b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/NOTES.txt new file mode 100644 index 00000000..22a4f2d2 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/NOTES.txt @@ -0,0 +1,25 @@ +Patroni can be accessed via port 5432 on the following DNS name from within your cluster: +{{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To get your password for superuser run: + + # superuser password + PGPASSWORD_SUPERUSER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-superuser}" | base64 --decode) + + # admin password + PGPASSWORD_ADMIN=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-admin}" | base64 --decode) + +To connect to your database: + +1. Run a postgres pod and connect using the psql cli: + # login as superuser + kubectl run -i --tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_SUPERUSER" \ + --command -- psql -U postgres \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres + + # login as admin + kubectl run -i -tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_ADMIN" \ + --command -- psql -U admin \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/_helpers.tpl b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/_helpers.tpl new file mode 100644 index 00000000..d5876632 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/_helpers.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "patroni.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "patroni.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "patroni.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use. +*/}} +{{- define "patroni.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "patroni.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createCronJob" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createLogicalBackupCronJob" -}} +{{- if and .Values.logicalbackup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for Postgresql HA patroni +*/}} +{{- define "patroni.createConfigmap" -}} +{{- if and .Values.preInitScript }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Create patroni envs. +*/}} +{{- define "patroni.envs" }} +{{- if .Values.kubernetes.configmaps.enable }} +- name: KUBERNETES_USE_CONFIGMAPS + value: "true" +{{- end }} +{{- if .Values.kubernetes.endpoints.enable }} +- name: PATRONI_KUBERNETES_USE_ENDPOINTS + value: 'true' +{{- end }} +- name: PATRONI_KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +- name: PATRONI_KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +- name: PATRONI_KUBERNETES_BYPASS_API_SERVICE + value: 'true' +- name: PATRONI_KUBERNETES_LABELS + value: '{application: {{ template "patroni.fullname" . }},release: {{ .Release.Name }},cluster-name: {{ template "patroni.fullname" . }}}' +- name: PATRONI_SUPERUSER_USERNAME + value: postgres +- name: PATRONI_SUPERUSER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser +- name: PATRONI_REPLICATION_USERNAME + value: standby +- name: PATRONI_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-replication +- name: PATRONI_REWIND_USERNAME + value: rewinder +- name: PATRONI_REWIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-rewind +- name: ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-user +- name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-password +- name: PATRONI_SCOPE + value: {{ template "patroni.fullname" . }} +- name: PATRONI_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: PATRONI_POSTGRESQL_DATA_DIR + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PATRONI_POSTGRESQL_PGPASS + value: /tmp/pgpass +- name: PATRONI_POSTGRESQL_LISTEN + value: '0.0.0.0:5432' +- name: PATRONI_RESTAPI_LISTEN + value: '0.0.0.0:8008' +{{- end -}} + +{{/* +Return true if a configmap object should be created for PG backup. +*/}} +{{- define "backup.createConfigmap" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Generate random password +*/}} + +{{/* +Get the super user password ; +*/}} +{{- define "credentials.superuserValue" }} +{{- if .Values.credentials.superuser }} + {{- .Values.credentials.superuser -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-superuser") -}} +{{- end -}} +{{- end }} + +{{/* +Get the rewind password ; +*/}} +{{- define "credentials.rewindValue" }} +{{- if .Values.credentials.rewind }} + {{- .Values.credentials.rewind -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-rewind") -}} +{{- end -}} +{{- end }} + +{{/* +Get the replication password ; +*/}} +{{- define "credentials.replicationValue" }} +{{- if .Values.credentials.replication }} + {{- .Values.credentials.replication -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-replication") -}} +{{- end -}} +{{- end }} + +{{/* +Get the administrator password ; +*/}} +{{- define "adminRole.passwordValue" }} +{{- if .Values.adminRole.password }} + {{- .Values.adminRole.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "admin-password") -}} +{{- end -}} +{{- end }} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} +{{- $len := (default 16 .Length) | int -}} +{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} +{{- if $obj }} +{{- index $obj .Key | b64dec -}} +{{- else -}} +{{- randAlphaNum $len -}} +{{- end -}} +{{- end }} + diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-backup.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-backup.yaml new file mode 100644 index 00000000..fdc62197 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-backup.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + backup.env: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.backupEnv "context" $ ) | nindent 4 }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-logicalbackup .yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-logicalbackup .yaml new file mode 100644 index 00000000..8de61100 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-logicalbackup .yaml @@ -0,0 +1,19 @@ +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + logicalbackup.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.logicalbackupScript "context" $ ) | nindent 4 }} + +{{- end }} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-patroni.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-patroni.yaml new file mode 100644 index 00000000..ad4b5849 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-patroni.yaml @@ -0,0 +1,20 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-patroni + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + pre_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.preInitScript "context" $ ) | nindent 4 }} + post_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postInitScript "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-postgresql.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-postgresql.yaml new file mode 100644 index 00000000..8aba698a --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cm-postgresql.yaml @@ -0,0 +1,18 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-postgresql + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom_conf.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postgresql.config "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cronjob.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cronjob.yaml new file mode 100644 index 00000000..495dfa7b --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/cronjob.yaml @@ -0,0 +1,43 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.backup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + curl "http://${REPLHOST}:9000/pg_backup" + env: + - name: REPLHOST + value: {{ include "patroni.fullname" . }}-repl +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/logicalbackup-cronjob.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/logicalbackup-cronjob.yaml new file mode 100644 index 00000000..071b9bd9 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/logicalbackup-cronjob.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.logicalbackup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-logicalbackup + image: "{{ .Values.logicalbackupImages.repository }}:{{ .Values.logicalbackupImages.tag }}" + imagePullPolicy: {{ .Values.logicalbackupImages.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + sh /opt/drycc/logicalbackup/logicalbackup.sh + env: + - name: PGHOST + value: {{ include "patroni.fullname" . }}-repl + - name: PGPORT + value: "5432" + - name: PGUSER + value: postgres + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: MINIO_BUCKET + value: {{ .Values.logicalbackup.minio.bucket }} + - name: MINIO_HOST + value: {{ .Values.logicalbackup.minio.endpoint }} + - name: MINIO_ACCESS_KEY + value: {{ .Values.logicalbackup.minio.access_key }} + - name: MINIO_SECRET_KEY + value: {{ .Values.logicalbackup.minio.secret_key }} + + volumeMounts: + - mountPath: "/opt/drycc/logicalbackup/" + name: logicalbackup-config + + volumes: + - name: logicalbackup-config + configMap: + name: {{ template "common.names.fullname" . }}-logicalbackup +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/networkpolicy.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/networkpolicy.yaml new file mode 100644 index 00000000..19ff2288 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/networkpolicy.yaml @@ -0,0 +1,54 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: patroni + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if eq .Values.service.type "ClusterIP" }} + ingress: + # Allow inbound connections + - ports: + - port: 5432 + - port: 9000 + - port: 80 + - port: 8008 + {{- if and .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{ end }} + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: backup + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + ingress: + - {} + {{- end }} +{{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/role.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/role.yaml new file mode 100644 index 00000000..8dec5309 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/role.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: + - create + - get + - list + - patch + - update + - watch + # delete is required only for 'patronictl remove' + - delete +- apiGroups: [""] + resources: ["services"] + verbs: + - create +- apiGroups: [""] + resources: ["endpoints"] + verbs: + - create + - get + - patch + - update + # the following three privileges are necessary only when using endpoints + - list + - watch + # delete is required only for for 'patronictl remove' + - delete + - deletecollection +- apiGroups: [""] + resources: ["pods"] + verbs: + - get + - list + - patch + - update + - watch +{{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/rolebinding.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/rolebinding.yaml new file mode 100644 index 00000000..5e15948f --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ template "patroni.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "patroni.fullname" . }} +{{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/sec.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/sec.yaml new file mode 100644 index 00000000..c2e13055 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/sec.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +type: Opaque +data: + password-superuser: {{ include "credentials.superuserValue" . | b64enc | quote }} + password-rewind: {{ include "credentials.rewindValue" . | b64enc | quote }} + password-replication: {{ include "credentials.replicationValue" . | b64enc | quote }} + admin-user: {{ .Values.adminRole.username | b64enc | quote }} + admin-password: {{ include "adminRole.passwordValue" . | b64enc | quote }} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/serviceaccount.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/serviceaccount.yaml new file mode 100644 index 00000000..e1b2ebf6 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "patroni.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml new file mode 100644 index 00000000..d826952c --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml @@ -0,0 +1,273 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + serviceName: {{ template "patroni.fullname" . }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + template: + metadata: + name: {{ template "patroni.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + spec: + {{- if .Values.patroni.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.patroni.nodeAffinityPreset.type "key" .Values.patroni.nodeAffinityPreset.key "values" .Values.patroni.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.patroni.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.nodeSelector "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "patroni.serviceAccountName" . }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + env: + {{- include "patroni.envs" . | indent 8 }} + {{- if .Values.env }} + {{- range $key, $val := .Values.env }} + - name: {{ $key | quote | upper }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + scheme: HTTP + path: /readiness + port: 8008 + initialDelaySeconds: 3 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /liveness + port: 8008 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /usr/bin/env + - bash + - -c + - | + # switch leader pod if the current pod is the leader + if curl --fail http://localhost:8008/read-write; then + init-stack patronictl switchover --force + fi + ports: + - containerPort: 8008 + protocol: TCP + - containerPort: 5432 + protocol: TCP + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/scripts/" + name: patroni-config + - mountPath: "/opt/drycc/postgresql/config/" + name: postgresql-config + # readOnly: true + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + - name: dshm + mountPath: /dev/shm + # readOnly: true + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: "{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}" + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + - name: DATA_SOURCE_NAME + value: {{ printf "postgresql://tea_mon:password@127.0.0.1:5432/postgres?sslmode=disable" }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPort }} + startupProbe: + initialDelaySeconds: 10 + tcpSocket: + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + command: + - /usr/bin/env + - bash + - -c + - | + python3 /opt/drycc/postgresql/pgbackup.py 0.0.0.0 9000 + env: + - name: PGHOST + value: localhost + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: PGUSER + value: postgres + - name: PGDATABASE + value: postgres + - name: PGPORT + value: "5432" + - name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" + ports: + - containerPort: 9000 + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ .Values.affinity | toYaml | indent 8 }} + {{- else if .Values.affinityTemplate }} + affinity: +{{ tpl .Values.affinityTemplate . | indent 8 }} + {{- end }} + volumes: + - name: patroni-config + configMap: + name: {{ template "common.names.fullname" . }}-patroni + - name: postgresql-config + configMap: + name: {{ template "common.names.fullname" . }}-postgresql + - name: backup-config + configMap: + name: {{ template "common.names.fullname" . }}-backup + {{- if not .Values.persistentVolume.enabled }} + - name: storage-volume + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + {{- if .Values.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + annotations: + {{- if .Values.persistentVolume.annotations }} +{{ toYaml .Values.persistentVolume.annotations | indent 8 }} + {{- end }} + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + accessModes: +{{ toYaml .Values.persistentVolume.accessModes | indent 8 }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-config.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-config.yaml new file mode 100644 index 00000000..5f7b0f60 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-config.yaml @@ -0,0 +1,11 @@ +# headless service to avoid deletion of patronidemo-config endpoint +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-config + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + clusterIP: None diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-master.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-master.yaml new file mode 100644 index 00000000..609ed5ba --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-master.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-master + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-metrics.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-metrics.yaml new file mode 100644 index 00000000..862c6a0c --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-metrics.yaml @@ -0,0 +1,32 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "patroni.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: patroni +{{- end }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-relp.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-relp.yaml new file mode 100644 index 00000000..252882b3 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc-relp.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-repl + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + - name: pgbackup + port: 9000 + targetPort: 9000 \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc.yaml new file mode 100644 index 00000000..ac0c2c44 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + type: ClusterIP + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml new file mode 100644 index 00000000..9ab405f3 --- /dev/null +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml @@ -0,0 +1,441 @@ +replicaCount: 3 +diagnosticMode: + enable: false + +service: + type: ClusterIP + +image: + # Image was built from registry.drycc.cc/drycc-addons/patroni:3.2 + # https://github.com/zalando/spilo/tree/master/postgres-appliance + repository: registry.drycc.cc/drycc-addons/postgresql-patroni + tag: 16 + # IfNotPresent , Always + pullPolicy: 'IfNotPresent' + +logicalbackupImages: + repository: registry.drycc.cc/drycc-addons/postgresql-logicalbackup + tag: 16 + # IfNotPresent , Always + pullPolicy: 'IfNotPresent' + +# Credentials used by Patroni , passwd +# https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql +# https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst +credentials: + superuser: "" + rewind: "" + replication: "" + +adminRole: + username: administrator + password: "" + +# Distribution Configuration stores +# Please note that only one of the following stores should be enabled. +kubernetes: + endpoints: + enable: true + configmaps: + enable: false + +# Extra custom environment variables. +env: {} + +# +#custom patroni.yaml used by patroni boot +# configuration: {} +preInitScript: | + mkdir -p /home/postgres/pgdata/log + ln -sf /dev/stdout "/home/postgres/pgdata/log/postgresql.csv" + cat > /opt/drycc/postgresql/patroni.yml <<__EOF__ + log: + level: INFO + restapi: + listen: 0.0.0.0:8008 + connect_address: 0.0.0.0:8008 + bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + failsafe_mode: true + postgresql: + use_pg_rewind: true + use_slots: true + pg_hba: + - local all all peer + - host all tea_mon 127.0.0.1/32 trust + - host all all 0.0.0.0/0 scram-sha-256 + - host replication ${PATRONI_REPLICATION_USERNAME} 0.0.0.0/0 scram-sha-256 + - host replication postgres 0.0.0.0/0 scram-sha-256 + custom_conf: '/opt/drycc/postgresql/config/custom_conf.conf' + parameters: + max_connections: {{ .Values.patroni.pgParameters.max_connections }} + max_worker_processes: {{ .Values.patroni.pgParameters.max_worker_processes }} + max_parallel_workers: {{ .Values.patroni.pgParameters.max_parallel_workers }} + wal_level: logical + hot_standby: "on" + max_wal_senders: 10 + max_replication_slots: 10 + hot_standby_feedback: on + max_prepared_transactions: 0 + max_locks_per_transaction: 64 + wal_log_hints: "on" + wal_keep_size: "1 GB" + max_slot_wal_keep_size: {{ .Values.patroni.pgParameters.max_slot_wal_keep_size | quote }} + track_commit_timestamp: "off" + archive_mode: "on" + archive_timeout: 300s + archive_command: sh /opt/drycc/postgresql/walbackup.sh %p + # timescaledb.license: 'timescale' + shared_preload_libraries: 'auto_explain,pg_stat_statements' + log_destination: 'csvlog' + log_filename: postgresql.log + logging_collector: on + log_directory: /home/postgres/pgdata/log + log_min_messages: 'info' + log_min_duration_statement: 1000 + log_lock_waits: on + log_statement: 'ddl' + initdb: + - auth-host: scram-sha-256 + - auth-local: trust + - encoding: UTF8 + - locale: en_US.UTF-8 + - data-checksums + post_bootstrap: sh /opt/drycc/postgresql/scripts/post_init.sh + restapi: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:8008' + postgresql: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:5432' + authentication: + superuser: + username: postgres + password: '${PATRONI_SUPERUSER_PASSWORD}' + replication: + username: standby + password: '${PATRONI_REPLICATION_PASSWORD}' + rewind: # Has no effect on postgres 10 and lower + username: rewinder + password: '${PATRONI_REWIND_PASSWORD}' + watchdog: + mode: off + __EOF__ + +postInitScript: | + #!/bin/bash + set -Eeu + # Create monitor user + psql -w -c "CREATE USER tea_mon ;GRANT pg_monitor TO tea_mon ;create extension pg_stat_statements;create extension pg_buffercache ;" + # Create admin user + if [[( -n "$ADMIN_USER") && ( -n "$ADMIN_PASSWORD")]]; then + + echo "Creating user ${ADMIN_USER}" + psql -w -c "CREATE USER ${ADMIN_USER} WITH SUPERUSER CREATEDB CREATEROLE CONNECTION LIMIT 10 LOGIN ENCRYPTED PASSWORD '${ADMIN_PASSWORD}'" + + else + echo "Skipping create admin user" + fi + psql -w -c "CHECKPOINT;CHECKPOINT;" + +backupEnv: | + #!/bin/bash + export USE_WALG={{ .Values.backup.enabled | quote }} + export BACKUP_NUM_TO_RETAIN={{ .Values.backup.retainBackups | quote}} + export WALG_BACKUP_THRESHOLD_MEGABYTES={{ .Values.backup.backupThresholdMegabytes | quote }} + export WALE_BACKUP_THRESHOLD_PERCENTAGE={{ .Values.backup.backupThresholdPercentage | quote }} + export AWS_ACCESS_KEY_ID={{ .Values.backup.s3.awsAccessKeyID | quote }} + export AWS_SECRET_ACCESS_KEY={{ .Values.backup.s3.awsSecretAccessKey | quote }} + export WALG_S3_PREFIX={{ .Values.backup.s3.walGS3Prefix | quote }} + export AWS_ENDPOINT={{ .Values.backup.s3.awsEndpoint | quote }} + export AWS_S3_FORCE_PATH_STYLE={{ .Values.backup.s3.awsS3ForcePathStyle | quote }} + export AWS_REGION={{ .Values.backup.s3.awsRegion | quote }} + +logicalbackupScript: | + #!/bin/bash + + # PostgreSQL 设置 + # POSTGRES_USER="postgres" + # POSTGRES_HOST="127.0.0.1" + + # MinIO 设置 + # MINIO_BUCKET="pgbackup" + # MINIO_HOST="http://localhost:9000" + # MINIO_ACCESS_KEY="admin123" + # MINIO_SECRET_KEY="admin123" + + # 设置 MinIO 客户端别名 + mc alias set myminio $MINIO_HOST $MINIO_ACCESS_KEY $MINIO_SECRET_KEY + + # 创建以当前日期和时间命名的备份目录 + BACKUP_DIR="$(date +%Y%m%d%H%M)" + MINIO_PATH="myminio/$MINIO_BUCKET/$BACKUP_DIR" + + # 备份全局对象 + echo "Backing up global objects to $MINIO_PATH/roles_globals.sql.gz" + pg_dumpall -g -U "$POSTGRES_USER" -h "$POSTGRES_HOST" | pigz | mc pipe "$MINIO_PATH/roles_globals.sql.gz" + + # 获取所有非模板数据库的列表 + DATABASES=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") + + # 为每个数据库执行备份 + for DB in $DATABASES; do + echo "Backing up $DB to $MINIO_PATH/$DB.sql.gz" + pg_dump -U "$POSTGRES_USER" -h "$POSTGRES_HOST" "$DB" | pigz | mc pipe "$MINIO_PATH/$DB.sql.gz" + done + + echo "Backup process completed!" + + +postgresql: + config: |- + log_min_duration_statement = 1000 + max_wal_size = 4GB + min_wal_size = 4GB + max_wal_senders = 10 + max_replication_slots = 10 + max_prepared_transactions = 0 + max_locks_per_transaction = 64 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '2 GB' + + + ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param patroni.podAffinityPreset Postgresql patroni pod affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param patroni.podAntiAffinityPreset Postgresql patroni pod anti-affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Postgresql Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param patroni.nodeAffinityPreset.type Postgresql patroni node affinity preset type. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param patroni.nodeAffinityPreset.key Postgresql patroni node label key to match Ignored if `patroni.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param patroni.nodeAffinityPreset.values Postgresql patroni node label values to match. Ignored if `patroni.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param patroni.affinity Affinity for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param patroni.nodeSelector Node labels for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + +## Postgresql Prometheus exporter parameters +## +metrics: + enabled: true + image: + repository: registry.drycc.cc/drycc-addons/postgres-exporter + tag: "0" + # IfNotPresent , Always + pullPolicy: 'IfNotPresent' + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database:.... + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + service: + ports: + metrics: 9187 + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + + customMetrics: {} + containerPort: 9187 + containerSecurityContext: + enabled: false + runAsUser: 1001 + runAsNonRoot: true + customLivenessProbe: {} + customReadinessProbe: + enabled: true + resources: + limits: + cpu: 100m + memory: 512Mi + hugepages-2Mi: 20Mi + requests: + cpu: 100m + memory: 512Mi + +logicalbackup: + enabled: false + scheduleCronJob: "22 0 * * 0" + minio: + used: true + buckect: "s3://xx" + access_key: "" + secret_key: "" + endpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +backup: + # Specifies whether Wal-G should be enabled + enabled: false + # Cron schedule for doing base backups + scheduleCronJob: "22 0 * * 0" + # Amount of base backups to retain + retainBackups: 2 + # Name of the secret that holds the credentials to the bucket + kubernetesSecret: + # Maximum size of the WAL segments accumulated after the base backup to + # consider WAL-G restore instead of pg_basebackup + backupThresholdMegabytes: 1024 + # Maximum ratio (in percents) of the accumulated WAL files to the base backup + # to consider WAL-G restore instead of pg_basebackup + backupThresholdPercentage: 30 + s3: + used: true + awsAccessKeyID: "" + awsSecretAccessKey: "" + walGS3Prefix: "s3://xx" + awsEndpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +logicalBackup: + enabled: false + +## persistentVolumeClaimRetentionPolicy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet +## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced +## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted +persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete +persistentVolume: + enabled: true + size: 10G + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + subPath: "" + mountPath: "/home/postgres/pgdata" + annotations: {} + accessModes: + - ReadWriteOnce + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 100m + memory: 512Mi + # hugepages-2Mi: 4Mi + requests: + cpu: 100m + memory: 512Mi + +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "1Gi" + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinityTemplate: | + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + application: {{ template "patroni.name" . }} + release: {{ .Release.Name | quote }} +affinity: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: +## Postgresql Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port Postgresql is listening + ## on. When true, Postgresql will accept connections from any source + ## (with the correct destination port). + ## + allowCurrentNamespace: true + allowNamespaces: +clusterDomain: cluster.local \ No newline at end of file diff --git a/addons/postgresql-cluster/16/meta.yaml b/addons/postgresql-cluster/16/meta.yaml new file mode 100644 index 00000000..54a98604 --- /dev/null +++ b/addons/postgresql-cluster/16/meta.yaml @@ -0,0 +1,30 @@ +name: postgresql-cluster-16 +version: 16 +id: 344e4f5f-8a46-4a0a-9f42-57ce2708da44 +description: "postgresql-cluster-16" +displayName: "postgresql-cluster-16" +metadata: + displayName: "postgresql-cluster-16" + provider: + name: drycc + supportURL: https://www.postgresql.org/ + documentationURL: https://github.com/drycc-addons/drycc-docker-postgresql-cluster +tags: postgresql-cluster +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "backup" + required: false + description: "Whether to use S3 for backup your data. default false . ps: Make sure there is a available S3 " +- name: "logicalbackup" + required: false + description: "Whether to use S3 for logical backup your data. default false . ps: Make sure there is a available S3 " +archive: false \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/meta.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/meta.yaml new file mode 100644 index 00000000..3c60cb3a --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c64g400" +id: a8f2ec87-7b64-4ad5-ae21-b8070a8cbf4e +description: "PostgreSQL Cluster standard-16c64g400 plan: Disk 400Gi ,vCPUs 16 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-16c64g400" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml new file mode 100644 index 00000000..8fe5e689 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-400 +patroni: + pgParameters: + max_worker_processes: 32 + max_parallel_workers: 16 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '32 MB' + maintenance_work_mem = '520 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 8 + max_parallel_maintenance_workers = 8 + max_parallel_workers = 16 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 40GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 16000m + memory: 64Gi + hugepages-2Mi: 40Mi + requests: + cpu: 8000m + memory: 32Gi + +persistentVolume: + enabled: true + size: 400Gi + +shmVolume: + sizeLimit: "32Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/meta.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/meta.yaml new file mode 100644 index 00000000..10e34581 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g20" +id: 15a1f1b3-2af5-4d6a-acf3-4251ef305d66 +description: "PostgreSQL Cluster standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G , DB MAX Connection 1000" +displayName: "standard-2c4g20" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml new file mode 100644 index 00000000..27ea5c42 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-20 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 1000 + max_slot_wal_keep_size: '2 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '1024 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '3 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '2 GB' + min_wal_size = '1 GB' + + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + memory: 4Gi + hugepages-2Mi: 20Mi + requests: + cpu: 2000m + memory: 4Gi + +persistentVolume: + enabled: true + size: 20Gi + +shmVolume: + sizeLimit: "2Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/meta.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/meta.yaml new file mode 100644 index 00000000..d19e4757 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c8g50" +id: 1cb94804-c93d-4500-a736-09f467230754 +description: "PostgreSQL Cluster standard-2c8g50 plan: Disk 50Gi ,vCPUs 2 , RAM 8G , DB MAX Connection 2000" +displayName: "standard-2c8g50" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml new file mode 100644 index 00000000..1062740f --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-50 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 2000 + max_slot_wal_keep_size: '5 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '2048 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '6 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '4 GB' + min_wal_size = '1 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + max_parallel_workers = 2 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 5GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + memory: 8Gi + hugepages-2Mi: 20Mi + requests: + cpu: 2000m + memory: 8Gi + +persistentVolume: + enabled: true + size: 50Gi + +shmVolume: + sizeLimit: "4Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/meta.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/meta.yaml new file mode 100644 index 00000000..e6588767 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c128g800" +id: e2a98bfc-da70-4db9-b58d-57a0fb869ad9 +description: "PostgreSQL Cluster standard-32c128g800 plan: Disk 800Gi ,vCPUs 32 , RAM 128G , DB MAX Connection 2000" +displayName: "standard-32c128g800" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml new file mode 100644 index 00000000..a2f1bb41 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '100 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '32768 MB' + work_mem = '64 MB' + maintenance_work_mem = '720 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '90 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '64 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 80GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 128Gi + hugepages-2Mi: 80Mi + requests: + cpu: 16000m + memory: 64Gi + +persistentVolume: + enabled: true + size: 800Gi + +shmVolume: + sizeLimit: "64Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/meta.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/meta.yaml new file mode 100644 index 00000000..1da62950 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c64g4000" +id: 60f4fb02-fb4d-4e42-acbe-194e056535a6 +description: "PostgreSQL Cluster standard-32c64g4000 plan: Disk 4Ti ,vCPUs 32 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-32c64g4000" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml new file mode 100644 index 00000000..cdc3c494 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '200 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '256 MB' + maintenance_work_mem = '2048 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '32 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 100GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 64Gi + hugepages-2Mi: 80Mi + requests: + cpu: 16000m + memory: 32Gi + +persistentVolume: + enabled: true + size: 4Ti + +shmVolume: + sizeLimit: "32Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/meta.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/meta.yaml new file mode 100644 index 00000000..405ee9fa --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g100" +id: 87f0fa69-67ba-4d1d-b0e7-cd84e5d57a0f +description: "PostgreSQL Cluster standard-4c16g100 plan: Disk 100Gi ,vCPUs 4 , RAM 16G , DB MAX Connection 2000" +displayName: "standard-4c16g100" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml new file mode 100644 index 00000000..3b5d6b45 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-100 + +patroni: + pgParameters: + max_worker_processes: 8 + max_parallel_workers: 4 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '4096 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '11 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '8 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 2 + max_parallel_maintenance_workers = 2 + max_parallel_workers = 4 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 10GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 4000m + memory: 16Gi + hugepages-2Mi: 50Mi + requests: + cpu: 4000m + memory: 16Gi + +persistentVolume: + enabled: true + size: 100Gi + +shmVolume: + sizeLimit: "8Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/meta.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/meta.yaml new file mode 100644 index 00000000..916b2924 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g200" +id: 44b98aeb-6774-42d2-80a3-c7bd2bf6155d +description: "PostgreSQL Cluster standard-8c32g200 plan: Disk 200Gi ,vCPUs 8 , RAM 32G , DB MAX Connection 2000" +displayName: "standard-8c32g200" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml new file mode 100644 index 00000000..e2484df5 --- /dev/null +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-200 + +patroni: + pgParameters: + max_worker_processes: 16 + max_parallel_workers: 8 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '8192 MB' + work_mem = '32 MB' + maintenance_work_mem = '420 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '22 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '3 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 4 + max_parallel_maintenance_workers = 4 + max_parallel_workers = 8 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 20GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 8000m + memory: 32Gi + hugepages-2Mi: 60Mi + requests: + cpu: 4000m + memory: 16Gi + +persistentVolume: + enabled: true + size: 200Gi + +shmVolume: + sizeLimit: "16Gi" \ No newline at end of file From 687852b667d7838a9ffcc6273a315e978d49f445 Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 31 Mar 2025 16:29:39 +0800 Subject: [PATCH 44/93] chore(addons): add lakefs --- addons/grafana/10/README.md | 34 +-- addons/grafana/10/chart/grafana/Chart.yaml | 2 +- addons/grafana/10/chart/grafana/values.yaml | 18 +- addons/index.yaml | 5 +- addons/lakefs/1.52/chart/lakefs/Chart.yaml | 17 ++ .../1.52/chart/lakefs/templates/NOTES.txt | 20 ++ .../1.52/chart/lakefs/templates/_env.tpl | 111 ++++++++++ .../1.52/chart/lakefs/templates/_fluffy.tpl | 209 ++++++++++++++++++ .../lakefs/templates/_gcp_proxy_container.tpl | 18 ++ .../1.52/chart/lakefs/templates/_helpers.tpl | 82 +++++++ .../lakefs/templates/_proxy_container.tpl | 30 +++ .../templates/additional-resources.yaml | 7 + .../chart/lakefs/templates/configmap.yaml | 9 + .../chart/lakefs/templates/deployment.yaml | 122 ++++++++++ .../lakefs/templates/dev-postgresql.yaml | 46 ++++ .../lakefs/templates/fluffy-configmap.yaml | 11 + .../lakefs/templates/fluffy-deployment.yaml | 119 ++++++++++ .../chart/lakefs/templates/fluffy-secret.yaml | 47 ++++ .../lakefs/templates/fluffy-service.yaml | 35 +++ .../1.52/chart/lakefs/templates/ingress.yaml | 80 +++++++ .../1.52/chart/lakefs/templates/secret.yaml | 16 ++ .../1.52/chart/lakefs/templates/service.yaml | 20 ++ addons/lakefs/1.52/chart/lakefs/values.yaml | 127 +++++++++++ addons/lakefs/1.52/meta.yaml | 30 +++ .../lakefs/1.52/plans/standard-1c1g/bind.yaml | 17 ++ .../standard-1c1g/create-instance-schema.json | 12 + .../lakefs/1.52/plans/standard-1c1g/meta.yaml | 6 + .../1.52/plans/standard-1c1g/values.yaml | 28 +++ .../lakefs/1.52/plans/standard-4c4g/bind.yaml | 17 ++ .../standard-4c4g/create-instance-schema.json | 12 + .../lakefs/1.52/plans/standard-4c4g/meta.yaml | 6 + .../1.52/plans/standard-4c4g/values.yaml | 11 + .../lakefs/1.52/plans/standard-4c8g/bind.yaml | 17 ++ .../lakefs/1.52/plans/standard-4c8g/meta.yaml | 6 + .../1.52/plans/standard-4c8g/values.yaml | 12 + .../1.52/plans/standard-8c16g/bind.yaml | 17 ++ .../1.52/plans/standard-8c16g/meta.yaml | 6 + .../1.52/plans/standard-8c16g/values.yaml | 12 + .../8.0/chart/mysql-cluster/README.md | 18 +- .../8.0/chart/mysql-cluster/values.yaml | 2 +- 40 files changed, 1376 insertions(+), 38 deletions(-) create mode 100644 addons/lakefs/1.52/chart/lakefs/Chart.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/NOTES.txt create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/_env.tpl create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/_fluffy.tpl create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/_gcp_proxy_container.tpl create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/_helpers.tpl create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/_proxy_container.tpl create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/additional-resources.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/configmap.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/deployment.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/dev-postgresql.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/fluffy-configmap.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/fluffy-deployment.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/fluffy-secret.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/fluffy-service.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/ingress.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/secret.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/templates/service.yaml create mode 100644 addons/lakefs/1.52/chart/lakefs/values.yaml create mode 100644 addons/lakefs/1.52/meta.yaml create mode 100644 addons/lakefs/1.52/plans/standard-1c1g/bind.yaml create mode 100644 addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json create mode 100644 addons/lakefs/1.52/plans/standard-1c1g/meta.yaml create mode 100644 addons/lakefs/1.52/plans/standard-1c1g/values.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c4g/bind.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json create mode 100644 addons/lakefs/1.52/plans/standard-4c4g/meta.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c4g/values.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c8g/bind.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c8g/meta.yaml create mode 100644 addons/lakefs/1.52/plans/standard-4c8g/values.yaml create mode 100644 addons/lakefs/1.52/plans/standard-8c16g/bind.yaml create mode 100644 addons/lakefs/1.52/plans/standard-8c16g/meta.yaml create mode 100644 addons/lakefs/1.52/plans/standard-8c16g/values.yaml diff --git a/addons/grafana/10/README.md b/addons/grafana/10/README.md index 3b1bd86a..eb3ba0bc 100644 --- a/addons/grafana/10/README.md +++ b/addons/grafana/10/README.md @@ -1,12 +1,12 @@ -# Grafana packaged by Bitnami +# Grafana packaged by Drycc Grafana is an open source metric analytics and visualization suite for visualizing time series data that supports various types of data sources. [Overview of Grafana](https://grafana.com/) -Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. +Trademarks: This software listing is packaged by Drycc. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. ## TL;DR @@ -19,7 +19,7 @@ $ helm install my-release my-repo/grafana This chart bootstraps a [grafana](https://github.com/drycc/containers/tree/main/drycc/grafana) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. -Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. +Drycc charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. ## Prerequisites @@ -51,9 +51,9 @@ $ helm delete my-release The command removes all the Kubernetes components associated with the chart and deletes the release. Use the option `--purge` to delete all persistent volumes too. -## Differences between the Bitnami Grafana chart and the Bitnami Grafana Operator chart +## Differences between the Drycc Grafana chart and the Drycc Grafana Operator chart -In the Bitnami catalog we offer both the drycc/grafana and drycc/grafana-operator charts. Each solution covers different needs and use cases. +In the Drycc catalog we offer both the drycc/grafana and drycc/grafana-operator charts. Each solution covers different needs and use cases. The *drycc/grafana* chart deploys a single Grafana installation (with grafana-image-renderer) using a Kubernetes Deployment object (together with Services, PVCs, ConfigMaps, etc.). The figure below shows the deployed objects in the cluster after executing *helm install*: @@ -441,12 +441,12 @@ This solution allows to easily deploy multiple Grafana instances compared to the | Name | Description | Value | | ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------- | ----------------------- | | `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` | -| `volumePermissions.image.repository` | Bitnami Shell image repository | `drycc/drycc-shell` | -| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r38` | -| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `volumePermissions.image.registry` | Drycc Shell image registry | `docker.io` | +| `volumePermissions.image.repository` | Drycc Shell image repository | `drycc/drycc-shell` | +| `volumePermissions.image.tag` | Drycc Shell image tag (immutable tags are recommended) | `11-debian-11-r38` | +| `volumePermissions.image.digest` | Drycc Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Drycc Shell image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Drycc Shell image pull secrets | `[]` | | `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | | `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | | `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | @@ -486,7 +486,7 @@ $ helm install my-release -f values.yaml my-repo/grafana It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. +Drycc will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. ### Using custom configuration @@ -614,14 +614,14 @@ As an alternative, you can use of the preset configurations for pod affinity, po ## Persistence -The [Bitnami Grafana](https://github.com/drycc/containers/tree/main/drycc/grafana) image stores the Grafana data and configurations at the `/opt/drycc/grafana/data` path of the container. +The [Drycc Grafana](https://github.com/drycc/containers/tree/main/drycc/grafana) image stores the Grafana data and configurations at the `/opt/drycc/grafana/data` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. ## Troubleshooting -Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.drycc.com/general/how-to/troubleshoot-helm-chart-issues). +Find more information about how to deal with common errors related to Drycc's Helm charts in [this troubleshooting guide](https://docs.drycc.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading @@ -631,7 +631,7 @@ This major release only bumps the Grafana version to 9.x. No major issues are ex ### To 7.0.0 -This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Drycc charts repository. Since the volume access mode when persistence is enabled is `ReadWriteOnce` in order to upgrade the deployment you will need to either use the `Recreate` strategy or delete the old deployment. @@ -651,7 +651,7 @@ This version also introduces `drycc/common`, a [library chart](https://helm.sh/d **What changes were introduced in this major version?** - Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. -- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Drycc Helm Charts **Considerations when upgrading to this version** @@ -677,7 +677,7 @@ This major version signifies this change. ## License -Copyright © 2022 Bitnami +Copyright © 2022 Drycc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/addons/grafana/10/chart/grafana/Chart.yaml b/addons/grafana/10/chart/grafana/Chart.yaml index 250b38d0..cde08be8 100644 --- a/addons/grafana/10/chart/grafana/Chart.yaml +++ b/addons/grafana/10/chart/grafana/Chart.yaml @@ -18,7 +18,7 @@ keywords: - metrics - logs maintainers: - - name: Bitnami + - name: Drycc url: https://github.com/drycc/charts name: grafana sources: diff --git a/addons/grafana/10/chart/grafana/values.yaml b/addons/grafana/10/chart/grafana/values.yaml index 9b029425..753732fa 100644 --- a/addons/grafana/10/chart/grafana/values.yaml +++ b/addons/grafana/10/chart/grafana/values.yaml @@ -42,7 +42,7 @@ commonAnnotations: {} ## @section Grafana parameters -## Bitnami Grafana image version +## Drycc Grafana image version ## ref: https://hub.docker.com/r/drycc/grafana/tags/ ## @param image.registry Grafana image registry ## @param image.repository Grafana image repository @@ -816,7 +816,7 @@ imageRenderer: ## @param imageRenderer.enabled Enable using a remote rendering service to render PNG images ## enabled: false - ## Bitnami Grafana Image Renderer image + ## Drycc Grafana Image Renderer image ## ref: https://hub.docker.com/r/drycc/grafana-image-renderer/tags/ ## @param imageRenderer.image.registry Grafana Image Renderer image registry ## @param imageRenderer.image.repository Grafana Image Renderer image repository @@ -1155,14 +1155,14 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` ## enabled: false - ## Bitnami Shell image + ## Drycc Shell image ## ref: https://hub.docker.com/r/drycc/drycc-shell/tags/ - ## @param volumePermissions.image.registry Bitnami Shell image registry - ## @param volumePermissions.image.repository Bitnami Shell image repository - ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy - ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## @param volumePermissions.image.registry Drycc Shell image registry + ## @param volumePermissions.image.repository Drycc Shell image repository + ## @param volumePermissions.image.tag Drycc Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Drycc Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Drycc Shell image pull policy + ## @param volumePermissions.image.pullSecrets Drycc Shell image pull secrets ## image: registry: docker.io diff --git a/addons/index.yaml b/addons/index.yaml index 128d481b..8aec83b4 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -74,4 +74,7 @@ entries: description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " nessie: - version: "0.103" - description: "Transactional Catalog for Data Lakes with Git-like semantics . " \ No newline at end of file + description: "Transactional Catalog for Data Lakes with Git-like semantics . " + lakefs: + - version: "1.52" + description: "LakeFS provides version control over the data lake, and uses Git-like semantics to create and access those versions. If you know git, you’ll be right at home with lakeFS. " \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/Chart.yaml b/addons/lakefs/1.52/chart/lakefs/Chart.yaml new file mode 100644 index 00000000..d6d7d678 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v2 +appVersion: 1.52.0 +description: A Helm chart for running LakeFS on Kubernetes +home: https://lakefs.io +icon: https://lakefs.io/wp-content/uploads/2020/07/lake-fs-color-2.svg +maintainers: +- email: services@treeverse.io + name: treeverse +name: lakefs +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.1 +sources: +- https://github.com/treeverse/lakeFS +type: application +version: 1.4.5 diff --git a/addons/lakefs/1.52/chart/lakefs/templates/NOTES.txt b/addons/lakefs/1.52/chart/lakefs/templates/NOTES.txt new file mode 100644 index 00000000..46996d04 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/NOTES.txt @@ -0,0 +1,20 @@ +Thank you for installing lakeFS! + +1. Run the following to get a url to start setting up lakeFS: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "lakefs.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/setup +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "lakefs.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "lakefs.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }}/setup +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl wait --for=condition=ready pod $POD_NAME + echo "Visit http://127.0.0.1:{{ .Values.deployment.port }}/setup to use your application" + kubectl port-forward $POD_NAME {{ .Values.deployment.port }}:{{ .Values.deployment.port }} --namespace {{ .Release.Namespace }} +{{- end }} + +2. See the docs on how to create your first repository: https://docs.lakefs.io/quickstart/repository.html diff --git a/addons/lakefs/1.52/chart/lakefs/templates/_env.tpl b/addons/lakefs/1.52/chart/lakefs/templates/_env.tpl new file mode 100644 index 00000000..c003e433 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/_env.tpl @@ -0,0 +1,111 @@ +{{- define "lakefs.env" -}} +env: + {{- if and .Values.existingSecret .Values.secretKeys.databaseConnectionString }} + - name: LAKEFS_DATABASE_POSTGRES_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{ .Values.existingSecret }} + key: {{ .Values.secretKeys.databaseConnectionString }} + {{- else if and .Values.secrets (.Values.secrets).databaseConnectionString }} + - name: LAKEFS_DATABASE_POSTGRES_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{ include "lakefs.fullname" . }} + key: database_connection_string + {{- end }} + {{- if .Values.existingSecret }} + - name: LAKEFS_AUTH_ENCRYPT_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.existingSecret }} + key: {{ .Values.secretKeys.authEncryptSecretKey }} + {{- else if and .Values.secrets (.Values.secrets).authEncryptSecretKey }} + - name: LAKEFS_AUTH_ENCRYPT_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "lakefs.fullname" . }} + key: auth_encrypt_secret_key + {{- else }} + - name: LAKEFS_AUTH_ENCRYPT_SECRET_KEY + value: 0d48e811f0b11d7f18d8c905 + {{- end }} + {{- if (.Values.fluffy).enabled }} + - name: LAKEFS_USAGE_REPORT_ENABLED + value: "true" + {{- if (.Values.fluffy.sso).enabled }} + - name: LAKEFS_AUTH_AUTHENTICATION_API_ENDPOINT + value: {{ printf "http://%s/api/v1" (include "fluffy.ssoServiceName" .) | quote }} + {{- if and .Values.ingress.enabled (.Values.fluffy.sso.saml).enabled }} + - name: LAKEFS_AUTH_COOKIE_AUTH_VERIFICATION_AUTH_SOURCE + value: saml + - name: LAKEFS_AUTH_UI_CONFIG_LOGIN_URL + value: {{ printf "%s/sso/login-saml" .Values.fluffy.sso.saml.lakeFSServiceProviderIngress }} + - name: LAKEFS_AUTH_UI_CONFIG_LOGOUT_URL + value: {{ printf "%s/sso/logout-saml" .Values.fluffy.sso.saml.lakeFSServiceProviderIngress }} + {{- end }} + {{- if (.Values.fluffy.sso.oidc).enabled }} + - name: LAKEFS_AUTH_UI_CONFIG_LOGIN_URL + value: '/oidc/login' + - name: LAKEFS_AUTH_UI_CONFIG_LOGOUT_URL + value: '/oidc/logout' + {{- end }} + {{- if (.Values.fluffy.sso.ldap).enabled }} + - name: LAKEFS_AUTH_REMOTE_AUTHENTICATOR_ENDPOINT + value: {{ default (printf "http://%s/api/v1/ldap/login" (include "fluffy.ssoServiceName" .) | quote) (.Values.fluffy.sso.ldap).endpointOverride }} + - name: LAKEFS_AUTH_UI_CONFIG_LOGOUT_URL + value: /logout + {{- end }} + {{- end }} + {{- if (.Values.fluffy.rbac).enabled }} + - name: LAKEFS_AUTH_API_ENDPOINT + value: {{ printf "http://%s/api/v1" (include "fluffy.rbacServiceName" .) | quote }} + - name: LAKEFS_AUTH_UI_CONFIG_RBAC + value: internal + {{- end }} + {{- end }} + {{- if .Values.s3Fallback.enabled }} + - name: LAKEFS_GATEWAYS_S3_FALLBACK_URL + value: http://localhost:7001 + {{- end }} + {{- if .Values.committedLocalCacheVolume }} + - name: LAKEFS_COMMITTED_LOCAL_CACHE_DIR + value: /lakefs/cache + {{- end }} + {{- if .Values.useDevPostgres }} + {{- if and (.Values.fluffy).enabled (.Values.fluffy.rbac).enabled }} + - name: LAKEFS_DATABASE_TYPE + value: postgres + - name: LAKEFS_DATABASE_POSTGRES_CONNECTION_STRING + value: 'postgres://lakefs:lakefs@postgres-server:5432/postgres?sslmode=disable' + {{- end }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- toYaml .Values.extraEnvVars | nindent 2 }} + {{- end }} +{{- if .Values.extraEnvVarsSecret }} +envFrom: + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} +{{- end }} +{{- end }} + +{{- define "lakefs.volumes" -}} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes }} +{{- end }} +{{- if .Values.committedLocalCacheVolume }} +- name: committed-local-cache +{{- toYaml .Values.committedLocalCacheVolume | nindent 2 }} +{{- end }} +{{- if not .Values.lakefsConfig }} +- name: {{ .Chart.Name }}-local-data +{{- end}} +{{- if .Values.lakefsConfig }} +- name: config-volume + configMap: + name: {{ include "lakefs.fullname" . }} + items: + - key: config.yaml + path: config.yaml +{{- end }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/_fluffy.tpl b/addons/lakefs/1.52/chart/lakefs/templates/_fluffy.tpl new file mode 100644 index 00000000..29c0accb --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/_fluffy.tpl @@ -0,0 +1,209 @@ +{{/* +fluffy resource full name +*/}} +{{- define "fluffy.fullname" -}} +{{- $name := include "lakefs.fullname" . }} +{{- printf "%s-fluffy" $name | trunc 63 }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "fluffy.labels" -}} +helm.sh/chart: {{ include "lakefs.chart" . }} +{{ include "fluffy.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "fluffy.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lakefs.name" . }}-fluffy +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "fluffy.serviceAccountName" -}} +{{- $lakeFSAcc := include "lakefs.serviceAccountName" . }} +{{- default $lakeFSAcc .Values.fluffy.serviceAccountName }} +{{- end }} + +{{/* +fluffy SSO service name +*/}} +{{- define "fluffy.ssoServiceName" -}} +{{- printf "fluffy-sso" }} +{{- end }} + +{{/* +fluffy Authorization service name +*/}} +{{- define "fluffy.rbacServiceName" -}} +{{- printf "fluffy-rbac" }} +{{- end }} + + +{{/* +Fluffy environment variables +*/}} + +{{- define "fluffy.env" -}} +env: + {{- if (.Values.fluffy.sso).enabled }} + {{- if and .Values.ingress.enabled (.Values.fluffy.sso.saml).enabled }} + - name: FLUFFY_AUTH_SAML_ENABLED + value: "true" + - name: FLUFFY_AUTH_LOGOUT_REDIRECT_URL + value: {{ .Values.fluffy.sso.saml.lakeFSServiceProviderIngress }} + - name: FLUFFY_AUTH_POST_LOGIN_REDIRECT_URL + value: {{ .Values.fluffy.sso.saml.lakeFSServiceProviderIngress }} + - name: FLUFFY_AUTH_SAML_SP_ROOT_URL + value: {{ .Values.fluffy.sso.saml.lakeFSServiceProviderIngress }} + - name: FLUFFY_AUTH_SAML_SP_X509_KEY_PATH + value: '/etc/saml_certs/rsa_saml_private.key' + - name: FLUFFY_AUTH_SAML_SP_X509_CERT_PATH + value: '/etc/saml_certs/rsa_saml_public.pem' + {{- end }} + {{- if (.Values.fluffy.sso.oidc).enabled }} + - name: FLUFFY_AUTH_POST_LOGIN_REDIRECT_URL + value: '/' + {{- if (.Values.fluffy.sso.oidc).client_secret }} + - name: FLUFFY_AUTH_OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "fluffy.fullname" . }} + key: oidc_client_secret + {{- end }} + {{- end }} + {{- if (.Values.fluffy.sso.ldap).enabled }} + - name: FLUFFY_AUTH_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "fluffy.fullname" . }} + key: ldap_bind_password + {{- end }} + {{- end }} + {{- if .Values.existingSecret }} + - name: FLUFFY_AUTH_ENCRYPT_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.existingSecret }} + key: {{ .Values.secretKeys.authEncryptSecretKey }} + {{- else if and .Values.secrets (.Values.secrets).authEncryptSecretKey }} + - name: FLUFFY_AUTH_ENCRYPT_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "lakefs.fullname" . }} + key: auth_encrypt_secret_key + {{- else }} + - name: FLUFFY_AUTH_ENCRYPT_SECRET_KEY + value: asdjfhjaskdhuioaweyuiorasdsjbaskcbkj + {{- end }} + {{- if and (.Values.fluffy.rbac).enabled }} + - name: FLUFFY_AUTH_SERVE_LISTEN_ADDRESS + value: {{ printf ":%s" (include "fluffy.rbac.containerPort" .) }} + {{- end }} + {{- if and .Values.existingSecret .Values.secretKeys.databaseConnectionString }} + - name: FLUFFY_DATABASE_POSTGRES_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{ .Values.existingSecret }} + key: {{ .Values.secretKeys.databaseConnectionString }} + {{- else if and .Values.secrets (.Values.secrets).databaseConnectionString }} + - name: FLUFFY_DATABASE_POSTGRES_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: {{ include "lakefs.fullname" . }} + key: database_connection_string + {{- else if and .Values.useDevPostgres (.Values.fluffy.rbac).enabled }} + - name: FLUFFY_DATABASE_TYPE + value: postgres + - name: FLUFFY_DATABASE_POSTGRES_CONNECTION_STRING + value: 'postgres://lakefs:lakefs@postgres-server:5432/postgres?sslmode=disable' + {{- end }} + {{- if .Values.fluffy.extraEnvVars }} + {{- toYaml .Values.fluffy.extraEnvVars | nindent 2 }} + {{- end }} +{{- if .Values.fluffy.extraEnvVarsSecret }} +envFrom: + - secretRef: + name: {{ .Values.fluffy.extraEnvVarsSecret }} +{{- end }} +{{- end }} + +{{- define "fluffy.volumes" -}} +{{- if .Values.fluffy.extraVolumes }} +{{ toYaml .Values.fluffy.extraVolumes }} +{{- end }} +{{- if not .Values.fluffy.fluffyConfig }} +- name: {{ .Chart.Name }}-local-data +{{- end}} +{{- if (.Values.fluffy.sso.saml).enabled }} +- name: secret-volume + secret: + secretName: saml-certificates +{{- end }} +{{- if .Values.fluffy.fluffyConfig }} +- name: {{ include "fluffy.fullname" . }}-config + configMap: + name: {{ include "fluffy.fullname" . }}-config + items: + - key: config.yaml + path: config.yaml +{{- end }} +{{- end }} + +{{- define "fluffy.ingressOverrides" -}} +{{- $serviceName := include "fluffy.ssoServiceName" . -}} +{{- $gitVersion := .Capabilities.KubeVersion.GitVersion -}} +{{- $pathsOverrides := list "/oidc/" "/api/v1/oidc/" "/saml/" "/sso/" "/api/v1/ldap/" }} +{{- range $pathsOverrides }} +- path: {{ . }} +{{- if semverCompare ">=1.19-0" $gitVersion }} + pathType: Prefix + backend: + service: + name: {{ $serviceName }} + port: + number: 80 +{{- else }} + backend: + serviceName: {{ $serviceName }} + servicePort: 80 +{{- end }} +{{- end }} +{{- end }} + +{{- define "fluffy.dockerConfigJson" }} +{{- $token := .Values.fluffy.image.privateRegistry.secretToken }} +{{- $username := "externallakefs" }} +{{- $registry := "https://index.docker.io/v1/" }} +{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"auth\":\"%s\"}}}" $registry $username $token (printf "%s:%s" $username $token | b64enc) | b64enc }} +{{- end }} + +{{- define "fluffy.sso.serviceType" }} +{{- default "ClusterIP" (.Values.fluffy.sso.service).type }} +{{- end }} +{{- define "fluffy.rbac.serviceType" }} +{{- default "ClusterIP" (.Values.fluffy.rbac.service).type }} +{{- end }} + +{{- define "fluffy.sso.port" }} +{{- default 80 (.Values.fluffy.sso.service).port }} +{{- end }} +{{- define "fluffy.rbac.port" }} +{{- default 80 (.Values.fluffy.rbac.service).port }} +{{- end }} + +{{- define "fluffy.sso.containerPort" }} +{{- default 8000 (.Values.fluffy.sso.service).containerPort }} +{{- end }} +{{- define "fluffy.rbac.containerPort" }} +{{- default 9000 (.Values.fluffy.rbac.service).containerPort }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/_gcp_proxy_container.tpl b/addons/lakefs/1.52/chart/lakefs/templates/_gcp_proxy_container.tpl new file mode 100644 index 00000000..56ac84dd --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/_gcp_proxy_container.tpl @@ -0,0 +1,18 @@ +{{- define "lakefs.gcpProxyContainer" }} +{{- if .Values.lakefsConfig }} +{{ $config := .Values.lakefsConfig | fromYaml }} +{{- end }} +{{- if .Values.gcpFallback.enabled }} +- name: gcp-proxy + image: eu.gcr.io/cloudsql-docker/gce-proxy:1.33.4 + imagePullPolicy: IfNotPresent + command: + - /cloud_sql_proxy + - -term_timeout=10s + env: +{{- if .Values.gcpFallback.instance }} + - name: INSTANCES + value: {{ .Values.gcpFallback.instance }} +{{- end }} +{{- end }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/_helpers.tpl b/addons/lakefs/1.52/chart/lakefs/templates/_helpers.tpl new file mode 100644 index 00000000..43c14f28 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/_helpers.tpl @@ -0,0 +1,82 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "lakefs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "lakefs.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "lakefs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lakefs.labels" -}} +helm.sh/chart: {{ include "lakefs.chart" . }} +{{ include "lakefs.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lakefs.selectorLabels" -}} +app: {{ include "lakefs.name" . }} +app.kubernetes.io/name: {{ include "lakefs.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lakefs.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "lakefs.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Define which repository to use according to the following: +1. Explicitly defined +2. Otherwise if fluffy is enabled - take enterprise image +3. Otherwise use OSS image +*/}} +{{- define "lakefs.repository" -}} +{{- if not .Values.image.repository }} +{{- if (.Values.fluffy).enabled }} +{{- default "treeverse/lakefs-enterprise" .Values.image.repository }} +{{- else }} +{{- default "treeverse/lakefs" .Values.image.repository }} +{{- end }} +{{- else }} +{{- default .Values.image.repository }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/templates/_proxy_container.tpl b/addons/lakefs/1.52/chart/lakefs/templates/_proxy_container.tpl new file mode 100644 index 00000000..c9b8fd73 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/_proxy_container.tpl @@ -0,0 +1,30 @@ +{{- define "lakefs.s3proxyContainer" }} +{{- if .Values.lakefsConfig }} +{{ $config := .Values.lakefsConfig | fromYaml }} +{{- end }} +{{- if .Values.s3Fallback.enabled }} +- name: s3proxy + image: andrewgaul/s3proxy + ports: + - containerPort: 7001 + env: + - name: S3PROXY_AUTHORIZATION + value: none +{{- if .Values.s3Fallback.aws_access_key }} + - name: JCLOUDS_IDENTITY + value: {{ .Values.s3Fallback.aws_access_key }} + - name: JCLOUDS_CREDENTIAL + value: {{ .Values.s3Fallback.aws_secret_key }} +{{- end }} + - name: JCLOUDS_PROVIDER + value: s3 + - name: JCLOUDS_ENDPOINT + value: https://s3.amazonaws.com + - name: S3PROXY_ENDPOINT + value: "http://0.0.0.0:7001" + - name: S3PROXY_VIRTUALHOST + value: localhost + - name: LOG_LEVEL + value: {{ .Values.s3Fallback.log_level | default "info" }} +{{- end }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/additional-resources.yaml b/addons/lakefs/1.52/chart/lakefs/templates/additional-resources.yaml new file mode 100644 index 00000000..b2009168 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/additional-resources.yaml @@ -0,0 +1,7 @@ +{{- range .Values.extraManifests }} +--- +{{ tpl (toYaml .) $ }} +{{- end }} + + + diff --git a/addons/lakefs/1.52/chart/lakefs/templates/configmap.yaml b/addons/lakefs/1.52/chart/lakefs/templates/configmap.yaml new file mode 100644 index 00000000..2813b7b9 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lakefs.fullname" . }} +{{- with .Values.lakefsConfig }} +data: + config.yaml: + {{- toYaml . | nindent 2 }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/deployment.yaml b/addons/lakefs/1.52/chart/lakefs/templates/deployment.yaml new file mode 100644 index 00000000..42cf8427 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/deployment.yaml @@ -0,0 +1,122 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "lakefs.fullname" . }} + labels: + {{- include "lakefs.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "lakefs.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "lakefs.selectorLabels" . | nindent 8 }} + spec: + {{- if eq ( include "lakefs.repository" .) "treeverse/lakefs-enterprise" }} + {{- if (.Values.fluffy.image.privateRegistry).enabled }} + imagePullSecrets: + {{- if (.Values.fluffy.image.privateRegistry).secretToken }} + - name: "docker-registry" + {{- else }} + - name: {{ .Values.fluffy.image.privateRegistry.secretName }} + {{- end }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "lakefs.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + {{- if .Values.lakefsConfig }} + args: ["run", "--config", "/etc/lakefs/config.yaml"] + {{- else }} + args: ["run"] + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ include "lakefs.repository" . }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.deployment.port }} + protocol: TCP + readinessProbe: + {{- if ((.Values.readinessProbe).failureThreshold) }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if ((.Values.readinessProbe).periodSeconds) }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + {{- end }} + {{- if ((.Values.readinessProbe).successThreshold) }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- end }} + {{- if ((.Values.readinessProbe).timeoutSeconds) }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + {{- end }} + httpGet: + path: /_health + port: http + livenessProbe: + {{- if ((.Values.livenessProbe).failureThreshold) }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if ((.Values.livenessProbe).periodSeconds) }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + {{- end }} + {{- if ((.Values.livenessProbe).successThreshold) }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- end }} + {{- if ((.Values.livenessProbe).timeoutSeconds) }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if ((.Values.livenessProbe).initialDelaySeconds) }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + {{- end }} + httpGet: + path: /_health + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- if .Values.extraVolumeMounts }} + {{ toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.committedLocalCacheVolume }} + - name: committed-local-cache + mountPath: "/lakefs/cache" + {{- end }} + {{- if .Values.lakefsConfig }} + - name: config-volume + mountPath: /etc/lakefs + {{- else }} + - name: {{ .Chart.Name }}-local-data + mountPath: "/lakefs/data" + {{- end }} + {{- include "lakefs.env" . | nindent 10 }} + {{- include "lakefs.s3proxyContainer" . | nindent 8}} + {{- include "lakefs.gcpProxyContainer" . | nindent 8}} + volumes: + {{- include "lakefs.volumes" . | nindent 8 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/dev-postgresql.yaml b/addons/lakefs/1.52/chart/lakefs/templates/dev-postgresql.yaml new file mode 100644 index 00000000..a99432cf --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/dev-postgresql.yaml @@ -0,0 +1,46 @@ +{{- if .Values.useDevPostgres }} +{{- if and (.Values.fluffy).enabled (.Values.fluffy.rbac).enabled }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + replicas: 1 + selector: + matchLabels: + app: postgres-lakefs + template: + metadata: + labels: + app: postgres-lakefs + spec: + containers: + - name: postgres + image: postgres:11 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + env: + - name: POSTGRES_DB + value: postgres + - name: POSTGRES_USER + value: lakefs + - name: POSTGRES_PASSWORD + value: lakefs +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres-server + labels: + app: postgres-lakefs +spec: + type: ClusterIP + ports: + - port: 5432 + selector: + app: postgres-lakefs + +{{- end}} +{{- end}} \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/templates/fluffy-configmap.yaml b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-configmap.yaml new file mode 100644 index 00000000..014c84f6 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-configmap.yaml @@ -0,0 +1,11 @@ +{{- if (.Values.fluffy).enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "fluffy.fullname" . }}-config +{{- with .Values.fluffy.fluffyConfig }} +data: + config.yaml: + {{- toYaml . | nindent 4 }} +{{- end }} +{{- end}} \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/templates/fluffy-deployment.yaml b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-deployment.yaml new file mode 100644 index 00000000..176ae422 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-deployment.yaml @@ -0,0 +1,119 @@ +{{- if (.Values.fluffy).enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "fluffy.fullname" . }} + labels: + {{- include "fluffy.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.fluffy.replicaCount }} + selector: + matchLabels: + {{- include "fluffy.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/fluffy-configmap.yaml") . | sha256sum }} + {{- with .Values.fluffy.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "fluffy.selectorLabels" . | nindent 8 }} + spec: + {{- if .Values.fluffy.image.privateRegistry.enabled }} + imagePullSecrets: + {{- if .Values.fluffy.image.privateRegistry.secretToken }} + - name: "docker-registry" + {{- else }} + - name: {{ .Values.fluffy.image.privateRegistry.secretName }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "fluffy.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + args: + - run + {{ range (.Values.fluffy).additionalArguments }} + - {{ . }} + {{ end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.fluffy.image.repository }}:{{ .Values.fluffy.image.tag }}" + imagePullPolicy: {{ .Values.fluffy.image.pullPolicy }} + ports: + - name: http-sso + containerPort: {{ include "fluffy.sso.containerPort" . }} + protocol: TCP + - name: http-rbac + containerPort: {{ include "fluffy.rbac.containerPort" . }} + protocol: TCP + readinessProbe: + {{- if ((.Values.readinessProbe).failureThreshold) }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if ((.Values.readinessProbe).periodSeconds) }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + {{- end }} + {{- if ((.Values.readinessProbe).successThreshold) }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- end }} + {{- if ((.Values.readinessProbe).timeoutSeconds) }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + {{- end }} + httpGet: + path: /_health + port: http-sso + livenessProbe: + {{- if ((.Values.livenessProbe).failureThreshold) }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if ((.Values.livenessProbe).periodSeconds) }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + {{- end }} + {{- if ((.Values.livenessProbe).successThreshold) }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- end }} + {{- if ((.Values.livenessProbe).timeoutSeconds) }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if ((.Values.livenessProbe).initialDelaySeconds) }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + {{- end }} + httpGet: + path: /_health + port: http-sso + resources: + {{- toYaml .Values.fluffy.resources | nindent 12 }} + volumeMounts: + {{- if .Values.fluffy.extraVolumeMounts }} + {{ toYaml .Values.fluffy.extraVolumeMounts | nindent 12 }} + {{- end }} + - name: {{ include "fluffy.fullname" . }}-config + mountPath: /etc/fluffy/ + {{- if and .Values.fluffy.sso.enabled (.Values.fluffy.sso.saml).enabled }} + - name: secret-volume + readOnly: true + mountPath: /etc/saml_certs/ + {{- end }} + {{- include "fluffy.env" . | nindent 10 }} + volumes: + {{- include "fluffy.volumes" . | nindent 8 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end}} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/fluffy-secret.yaml b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-secret.yaml new file mode 100644 index 00000000..3150f5a1 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-secret.yaml @@ -0,0 +1,47 @@ +{{- if (.Values.fluffy).enabled }} +{{- if (.Values.fluffy.secrets).create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "fluffy.fullname" . }} + labels: + {{- include "fluffy.labels" . | nindent 4 }} +type: Opaque +data: +{{- if and .Values.fluffy.sso.enabled (.Values.fluffy.sso.oidc).client_secret }} + oidc_client_secret: {{ .Values.fluffy.sso.oidc.client_secret | b64enc }} +{{- end}} +{{- if and .Values.fluffy.sso.enabled (.Values.fluffy.sso.ldap).bind_password }} + ldap_bind_password: {{ .Values.fluffy.sso.ldap.bind_password | b64enc }} +{{- end}} +{{- end}} + +--- +# docker-registry secret for image pull + +{{- if and .Values.fluffy.image.privateRegistry.enabled .Values.fluffy.image.privateRegistry.secretToken }} +apiVersion: v1 +kind: Secret +metadata: + name: docker-registry +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "fluffy.dockerConfigJson" . }} +{{- end }} + +--- +# certificates to use with SAML SSO against server such as AD FS + +{{- if and (.Values.fluffy.sso).enabled (.Values.fluffy.sso.saml).enabled }} +{{- if .Values.fluffy.sso.saml.createSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: saml-certificates +data: + rsa_saml_public.pem: '{{ .Values.fluffy.sso.saml.certificate.saml_rsa_public_cert | b64enc }}' + rsa_saml_private.key: '{{ .Values.fluffy.sso.saml.certificate.saml_rsa_private_key | b64enc }}' +{{- end }} +{{- end }} + +{{- end}} \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/templates/fluffy-service.yaml b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-service.yaml new file mode 100644 index 00000000..942d4212 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/fluffy-service.yaml @@ -0,0 +1,35 @@ +{{- if (.Values.fluffy).enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "fluffy.ssoServiceName" . }} + labels: + {{- include "fluffy.labels" . | nindent 4 }} +spec: + type: {{ include "fluffy.sso.serviceType" . }} + ports: + - port: {{ include "fluffy.sso.port" . }} + targetPort: http-sso + protocol: TCP + name: http-sso + selector: + {{- include "fluffy.selectorLabels" . | nindent 4 }} +--- +{{- if .Values.fluffy.rbac.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "fluffy.rbacServiceName" . }} + labels: + {{- include "fluffy.labels" . | nindent 4 }} +spec: + type: {{ include "fluffy.rbac.serviceType" . }} + ports: + - port: {{ include "fluffy.rbac.port" . }} + targetPort: http-rbac + protocol: TCP + name: http-rbac + selector: + {{- include "fluffy.selectorLabels" . | nindent 4 }} +{{- end }} +{{- end}} \ No newline at end of file diff --git a/addons/lakefs/1.52/chart/lakefs/templates/ingress.yaml b/addons/lakefs/1.52/chart/lakefs/templates/ingress.yaml new file mode 100644 index 00000000..0191fb21 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/ingress.yaml @@ -0,0 +1,80 @@ +{{- if .Values.ingress.enabled -}} + {{- $root := . -}} + {{- $fullName := include "lakefs.fullname" . -}} + {{- $svcPort := .Values.service.port -}} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 + {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 + {{- else -}} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "lakefs.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + {{- with .Values.ingress.defaultBackend }} + defaultBackend: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if ($.Values.fluffy).enabled }} + {{- include "fluffy.ingressOverrides" $root | nindent 10 }} + {{- end }} + {{- if (.pathsOverrides) }} + {{- range .pathsOverrides }} + - path: {{ .path }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ .serviceName }} + port: + number: {{ .servicePort }} + {{- else }} + backend: + serviceName: {{ .serviceName }} + servicePort: {{ .servicePort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/secret.yaml b/addons/lakefs/1.52/chart/lakefs/templates/secret.yaml new file mode 100644 index 00000000..33e18d26 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/secret.yaml @@ -0,0 +1,16 @@ +{{- if and (.Values.secrets) (not .Values.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "lakefs.fullname" . }} + labels: + {{- include "lakefs.labels" . | nindent 4 }} +type: Opaque +data: +{{- if .Values.secrets.databaseConnectionString }} + database_connection_string: {{ .Values.secrets.databaseConnectionString | default "" | b64enc }} +{{- end }} +{{- if .Values.secrets.authEncryptSecretKey }} + auth_encrypt_secret_key: {{ .Values.secrets.authEncryptSecretKey | default "" | b64enc }} +{{- end }} +{{- end }} diff --git a/addons/lakefs/1.52/chart/lakefs/templates/service.yaml b/addons/lakefs/1.52/chart/lakefs/templates/service.yaml new file mode 100644 index 00000000..281239ad --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/templates/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lakefs.fullname" . }} + labels: + {{- include "lakefs.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.service.annotations | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + protocol: TCP + name: http + selector: + {{- include "lakefs.selectorLabels" . | nindent 4 }} diff --git a/addons/lakefs/1.52/chart/lakefs/values.yaml b/addons/lakefs/1.52/chart/lakefs/values.yaml new file mode 100644 index 00000000..7a9d4f38 --- /dev/null +++ b/addons/lakefs/1.52/chart/lakefs/values.yaml @@ -0,0 +1,127 @@ +# Default values for lakefs. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + pullPolicy: IfNotPresent + repository: registry.drycc.cc/drycc-addons/lakefs + tag: "1.52.0" +nameOverride: "" +fullnameOverride: "" + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + ingressClassName: "" + defaultBackend: {} + hosts: + - host: chart-example.local + paths: [] + # redirect to a different service based on path prefix for advanced use cases only + # pathsOverrides: + # - path: /some/path + # serviceName: other-example.local + # servicePort: 80 + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + +podAnnotations: {} +jobPodAnnotations: + sidecar.istio.io/inject: "false" + +deployment: + port: 8000 + +service: + annotations: {} + type: ClusterIP + port: 80 + +resources: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +s3Fallback: + enabled: false + +gcpFallback: + enabled: false + +lakefsConfig: | + database: + type: local + blockstore: + type: local + +# yamllint disable rule:comments + +# Example: using node instance storage for caching committed data: +# For more information: https://www.vipmind.me/infra/aws/how-to-use-ephemeral-disk-from-ec2-instance-in-eks.html +#committedLocalCacheVolume: +# hostPath: +# path: /media/ephemeral0 + +# yamllint enable + +serviceAccount: {} + +# to use an existing service account, replace the serviceAccount map above with the following: +# serviceAccount: +# name: "my-serviceaccount" + +# Extra Environment Values - allows yaml definitions +extraEnvVars: +# - name: VALUE_FROM_SECRET +# valueFrom: +# secretKeyRef: +# name: secret_name +# key: secret_key +# Override K8S defaults for readinessProbe +# readinessProbe: +# failureThreshold: 10 +# periodSeconds: 5 +# successThreshold: 4 +# timeoutSeconds: 1 +# Override K8S defaults for livenessProbe +# livenessProbe: +# failureThreshold: 20 +# periodSeconds: 5 +# successThreshold: 4 +# timeoutSeconds: 1 +# initialDelaySeconds: 5 + +# Everything under fluffy stanza is for lakeFS enterprise only https://docs.lakefs.io/enterprise/ +fluffy: + enabled: false + image: + repository: treeverse/fluffy + tag: '0.9.0' + pullPolicy: IfNotPresent + +# Start local postgres pod for quick start, not for production +useDevPostgres: true + +# Name of existing secret to use +existingSecret: null + +# Keys used for existingSecret +secretKeys: + authEncryptSecretKey: auth_encrypt_secret_key + # Use the following to fetch PostgreSQL connection string from an existing secret: + databaseConnectionString: null diff --git a/addons/lakefs/1.52/meta.yaml b/addons/lakefs/1.52/meta.yaml new file mode 100644 index 00000000..ee956aa2 --- /dev/null +++ b/addons/lakefs/1.52/meta.yaml @@ -0,0 +1,30 @@ +name: lakefs +version: 1.52 +id: 6e34d8ba-0dd4-11f0-9f70-9b985edbdcd7 +description: "lakefs" +displayName: "lakefs" +metadata: + displayName: "lakefs" + provider: + name: drycc + supportURL: https://docs.lakefs.io + documentationURL: https://github.com/drycc-addons/ +tags: lakefs +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "lakefsConfig" + required: false + description: "lakefsConfig config for values.yaml" +- name: "replicaCount" + required: false + description: "replicaCount config for values.yaml" +- name: "extraEnvVars" + required: false + description: "extraEnvVars config for values.yaml" +archive: false diff --git a/addons/lakefs/1.52/plans/standard-1c1g/bind.yaml b/addons/lakefs/1.52/plans/standard-1c1g/bind.yaml new file mode 100644 index 00000000..3247afdd --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-1c1g/bind.yaml @@ -0,0 +1,17 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: {{ printf "EXTRANET_HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: {{ printf "HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: SECRET_KEY + value: 0d48e811f0b11d7f18d8c905 + - name: PORT + value: 80 diff --git a/addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json b/addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/lakefs/1.52/plans/standard-1c1g/meta.yaml b/addons/lakefs/1.52/plans/standard-1c1g/meta.yaml new file mode 100644 index 00000000..1b8ebe49 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-1c1g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c1g" +id: 0074ff96-0dd2-11f0-85dd-bff4b041926a +description: "lakefs plan standard-1c1g which limit 1c1g ." +displayName: "1c1g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/lakefs/1.52/plans/standard-1c1g/values.yaml b/addons/lakefs/1.52/plans/standard-1c1g/values.yaml new file mode 100644 index 00000000..2de793ad --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-1c1g/values.yaml @@ -0,0 +1,28 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-grafana-standard-1c1g + +resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 1000m + memory: 1Gi + +lakefsConfig: | + database: + type: "postgres" + + postgres: + connection_string: "xxxx" + + blockstore: + type: s3 + s3: + force_path_style: true + endpoint: xxx + discover_bucket_region: false + credentials: + access_key_id: xxx + secret_access_key: xx diff --git a/addons/lakefs/1.52/plans/standard-4c4g/bind.yaml b/addons/lakefs/1.52/plans/standard-4c4g/bind.yaml new file mode 100644 index 00000000..3247afdd --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c4g/bind.yaml @@ -0,0 +1,17 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: {{ printf "EXTRANET_HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: {{ printf "HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: SECRET_KEY + value: 0d48e811f0b11d7f18d8c905 + - name: PORT + value: 80 diff --git a/addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json b/addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/lakefs/1.52/plans/standard-4c4g/meta.yaml b/addons/lakefs/1.52/plans/standard-4c4g/meta.yaml new file mode 100644 index 00000000..ae6f1e7d --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c4g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c4g" +id: 07243b90-0dd2-11f0-b921-97ce1e5f9446 +description: "lakefs plan standard-4c4g ." +displayName: "4c4g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/lakefs/1.52/plans/standard-4c4g/values.yaml b/addons/lakefs/1.52/plans/standard-4c4g/values.yaml new file mode 100644 index 00000000..d9008fa8 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c4g/values.yaml @@ -0,0 +1,11 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-grafana-standard-4c4g + +resources: + limits: + cpu: 4000m + memory: 4Gi + requests: + cpu: 1000m + memory: 1Gi diff --git a/addons/lakefs/1.52/plans/standard-4c8g/bind.yaml b/addons/lakefs/1.52/plans/standard-4c8g/bind.yaml new file mode 100644 index 00000000..3247afdd --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c8g/bind.yaml @@ -0,0 +1,17 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: {{ printf "EXTRANET_HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: {{ printf "HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: SECRET_KEY + value: 0d48e811f0b11d7f18d8c905 + - name: PORT + value: 80 diff --git a/addons/lakefs/1.52/plans/standard-4c8g/meta.yaml b/addons/lakefs/1.52/plans/standard-4c8g/meta.yaml new file mode 100644 index 00000000..66e2b625 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c8g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g" +id: 22327fd6-0dd3-11f0-86e5-ef33ac735b18 +description: "lakefs plan standard-4c8g ." +displayName: "4c8g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/lakefs/1.52/plans/standard-4c8g/values.yaml b/addons/lakefs/1.52/plans/standard-4c8g/values.yaml new file mode 100644 index 00000000..8a954e1a --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-4c8g/values.yaml @@ -0,0 +1,12 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-grafana-standard-4c8g + + +resources: + limits: + cpu: 4000m + memory: 8Gi + requests: + cpu: 1000m + memory: 1Gi \ No newline at end of file diff --git a/addons/lakefs/1.52/plans/standard-8c16g/bind.yaml b/addons/lakefs/1.52/plans/standard-8c16g/bind.yaml new file mode 100644 index 00000000..3247afdd --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-8c16g/bind.yaml @@ -0,0 +1,17 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: {{ printf "EXTRANET_HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: {{ printf "HOST" }} + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: SECRET_KEY + value: 0d48e811f0b11d7f18d8c905 + - name: PORT + value: 80 diff --git a/addons/lakefs/1.52/plans/standard-8c16g/meta.yaml b/addons/lakefs/1.52/plans/standard-8c16g/meta.yaml new file mode 100644 index 00000000..35e4c437 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-8c16g/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g" +id: 307d195c-0dd3-11f0-bfca-2f68dd3fac98 +description: "lakefs plan standard-8c16g ." +displayName: "8c16g" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/lakefs/1.52/plans/standard-8c16g/values.yaml b/addons/lakefs/1.52/plans/standard-8c16g/values.yaml new file mode 100644 index 00000000..001ff535 --- /dev/null +++ b/addons/lakefs/1.52/plans/standard-8c16g/values.yaml @@ -0,0 +1,12 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-grafana-standard-8c16g + + +resources: + limits: + cpu: 8000m + memory: 16Gi + requests: + cpu: 1000m + memory: 1Gi diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/README.md b/addons/mysql-cluster/8.0/chart/mysql-cluster/README.md index c44f6211..2cb1f7fd 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/README.md +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/README.md @@ -1,12 +1,12 @@ -# MySQL packaged by Bitnami +# MySQL packaged by Drycc MySQL is a fast, reliable, scalable, and easy to use open source relational database system. Designed to handle mission-critical, heavy-load production applications. [Overview of MySQL](http://www.mysql.com) -Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. +Trademarks: This software listing is packaged by Drycc. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. ## TL;DR @@ -19,7 +19,7 @@ $ helm install my-release my-repo/mysql This chart bootstraps a [MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) replication cluster deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. -Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. +Drycc charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. ## Prerequisites @@ -386,7 +386,7 @@ $ helm install my-release -f values.yaml my-repo/mysql It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. +Drycc will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. ### Use a different MySQL version @@ -394,7 +394,7 @@ To modify the application version used in this chart, specify a different versio ### Customize a new MySQL instance -The [Bitnami MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. +The [Drycc MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. The allowed extensions are `.sh`, `.sql` and `.sql.gz`. @@ -430,7 +430,7 @@ initContainers: ## Persistence -The [Bitnami MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. +The [Drycc MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. @@ -459,7 +459,7 @@ As an alternative, you can use the preset configurations for pod affinity, pod a ## Troubleshooting -Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). +Find more information about how to deal with common errors related to Drycc's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading @@ -473,7 +473,7 @@ $ helm upgrade my-release my-repo/mysql --set auth.rootPassword=[ROOT_PASSWORD] ### To 9.0.0 -This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Drycc charts repository. Affected values: @@ -532,7 +532,7 @@ $ kubectl delete statefulset mysql-slave --cascade=false ## License -Copyright © 2022 Bitnami +Copyright © 2022 Drycc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index 1639f6bd..16bd4e37 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -65,7 +65,7 @@ diagnosticMode: ## @section MySQL common parameters -## Bitnami MySQL image +## Drycc MySQL image ## ref: https://hub.docker.com/r/drycc/mysql/tags/ ## @param image.registry MySQL image registry ## @param image.repository MySQL image repository From f67404ebe3ba004dc93badd73756ea0319ee2bb3 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 18 Apr 2025 16:40:18 +0800 Subject: [PATCH 45/93] chore(flink): allow image config --- addons/flink/1.17/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/addons/flink/1.17/meta.yaml b/addons/flink/1.17/meta.yaml index b40b6017..95f7b633 100644 --- a/addons/flink/1.17/meta.yaml +++ b/addons/flink/1.17/meta.yaml @@ -15,6 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: +- name: "image" + required: false + description: "image config for values.yaml" - name: "jobmanager.nodeSelector" required: false description: "jobmanager nodeSelector config for values.yaml" From e1b0417501f3f8a250fee8cab5897c00da8b58ec Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 13 May 2025 09:34:00 +0800 Subject: [PATCH 46/93] chore(spark): add standard-16c64g5w plans --- .../3.4/plans/standard-16c64g5w/bind.yaml | 37 ++++++++ .../create-instance-schema.json | 12 +++ .../3.4/plans/standard-16c64g5w/meta.yaml | 6 ++ .../3.4/plans/standard-16c64g5w/values.yaml | 95 +++++++++++++++++++ 4 files changed, 150 insertions(+) create mode 100644 addons/spark/3.4/plans/standard-16c64g5w/bind.yaml create mode 100644 addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-16c64g5w/meta.yaml create mode 100644 addons/spark/3.4/plans/standard-16c64g5w/values.yaml diff --git a/addons/spark/3.4/plans/standard-16c64g5w/bind.yaml b/addons/spark/3.4/plans/standard-16c64g5w/bind.yaml new file mode 100644 index 00000000..8f3bd78f --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c64g5w/bind.yaml @@ -0,0 +1,37 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: MASTER_DOMAIN + value: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.clusterIP }' + + - name: MASTER_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="cluster")].port }' + + {{- if .Values.security.ssl.enabled }} + - name: HTTPS_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="https")].port }' + {{- else }} + - name: HTTP_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} diff --git a/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json b/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-16c64g5w/meta.yaml b/addons/spark/3.4/plans/standard-16c64g5w/meta.yaml new file mode 100644 index 00000000..29c9778d --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c64g5w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c64g5w" +id: 4fbc37f3-b36e-44d9-bf30-6bd1ddee5fde +description: "Spark standard-16c64g5w plan which limit resources 16 cores 64G memory 5 workers." +displayName: "standard-16c64g5w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/spark/3.4/plans/standard-16c64g5w/values.yaml b/addons/spark/3.4/plans/standard-16c64g5w/values.yaml new file mode 100644 index 00000000..3940f0de --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c64g5w/values.yaml @@ -0,0 +1,95 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-spark-standard-16c64g5w" + +## Spark master specific configuration +## +master: + ## @param master.daemonMemoryLimit Set the memory limit for the master daemon + ## + daemonMemoryLimit: "" + ## @param master.configOptions Use a string to set the config options for in the form "-Dx=y" + ## + configOptions: "" + ## @param master.extraEnvVars Extra environment variables to pass to the master container + ## For example: + ## extraEnvVars: + ## - name: SPARK_DAEMON_JAVA_OPTS + ## value: -Dx=y + ## + ## Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for the container + ## @param master.resources.requests The requested resources for the container + ## + resources: + limits: + cpu: 16 + memory: 64Gi + requests: + cpu: 4 + memory: 16Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + accessModes: + - ReadWriteOnce + size: 1Gi +## @section Spark worker parameters +## + +## Spark worker specific configuration +## +worker: + ## @param worker.daemonMemoryLimit Set the memory limit for the worker daemon + ## + daemonMemoryLimit: "" + ## @param worker.memoryLimit Set the maximum memory the worker is allowed to use + ## + memoryLimit: "" + ## @param worker.coreLimit Se the maximum number of cores that the worker can use + ## + coreLimit: "" + javaOptions: "" + ## @param worker.configOptions Set extra options to configure the worker in the form `-Dx=y` + ## + configOptions: "" + ## @param worker.extraEnvVars An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: SPARK_DAEMON_JAVA_OPTS + ## value: -Dx=y + + ## @param worker.replicaCount Number of spark workers (will be the minimum number when autoscaling is enabled) + ## + replicaCount: 5 + ## Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param worker.resources.limits The resources limits for the container + ## @param worker.resources.requests The requested resources for the container + ## + resources: + limits: + cpu: 16 + memory: 64Gi + requests: + cpu: 4 + memory: 16Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + accessModes: + - ReadWriteOnce + size: 1Gi From a06c67358b36b7dc5fcfc76a6544a9a7881492a3 Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 13 May 2025 16:24:28 +0800 Subject: [PATCH 47/93] chore(addons): add mongodb info domain and replica set name --- addons/mongodb/7.0/plans/standard-16c64g400/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-1c2g10/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-2c4g20/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-2c8g50/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-32c128g800/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-4c16g100/bind.yaml | 8 ++++++++ addons/mongodb/7.0/plans/standard-8c32g200/bind.yaml | 8 ++++++++ 7 files changed, 56 insertions(+) diff --git a/addons/mongodb/7.0/plans/standard-16c64g400/bind.yaml b/addons/mongodb/7.0/plans/standard-16c64g400/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-16c64g400/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-16c64g400/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-1c2g10/bind.yaml b/addons/mongodb/7.0/plans/standard-1c2g10/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-1c2g10/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-1c2g10/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-2c4g20/bind.yaml b/addons/mongodb/7.0/plans/standard-2c4g20/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-2c4g20/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-2c4g20/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-2c8g50/bind.yaml b/addons/mongodb/7.0/plans/standard-2c8g50/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-2c8g50/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-2c8g50/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-32c128g800/bind.yaml b/addons/mongodb/7.0/plans/standard-32c128g800/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-32c128g800/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-32c128g800/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-4c16g100/bind.yaml b/addons/mongodb/7.0/plans/standard-4c16g100/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-4c16g100/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-4c16g100/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: diff --git a/addons/mongodb/7.0/plans/standard-8c32g200/bind.yaml b/addons/mongodb/7.0/plans/standard-8c32g200/bind.yaml index 13939fc0..a8ce0395 100644 --- a/addons/mongodb/7.0/plans/standard-8c32g200/bind.yaml +++ b/addons/mongodb/7.0/plans/standard-8c32g200/bind.yaml @@ -10,6 +10,7 @@ credential: jsonpath: '{ .status.loadBalancer.ingress[*].ip }' {{- end }} {{- end }} + {{- range $i, $e := until $replicaCount }} - name: {{ printf "HOSTNAME_%d" $i }} valueFrom: @@ -17,6 +18,13 @@ credential: name: {{ printf "%s-%d" $fullName $i }} jsonpath: '{ .spec.clusterIP }' {{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' - name: PASSWORD valueFrom: secretKeyRef: From 447384f923dd3b9e20dc063f05458a5fff75759f Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 19 May 2025 14:42:02 +0800 Subject: [PATCH 48/93] chore(airflow): update meta --- addons/airflow/2/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/addons/airflow/2/meta.yaml b/addons/airflow/2/meta.yaml index 001bb24f..076efa04 100644 --- a/addons/airflow/2/meta.yaml +++ b/addons/airflow/2/meta.yaml @@ -21,6 +21,9 @@ allow_parameters: - name: "auth.password" required: false description: "auth.password config for values.yaml" +- name: "auth.fernetKey" + required: false + description: "auth.fernetKey config for values.yaml" - name: "git" required: false description: "git config for values.yaml" From 19e1df229861fbe8c044a7a0c889b183ff8c50cc Mon Sep 17 00:00:00 2001 From: duanhongyi Date: Mon, 19 May 2025 17:49:03 +0800 Subject: [PATCH 49/93] chore(scripts): change dist dir --- .gitignore | 2 +- Makefile | 1 - scripts/prepare-release-assets.sh | 4 ++-- scripts/push_release.sh | 16 ++++++++-------- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 4817d62e..b097d5f2 100644 --- a/.gitignore +++ b/.gitignore @@ -44,7 +44,7 @@ Network Trash Folder Temporary Items .apdisk -toCopy/ +_dist/ out/ Chart.lock *.tgz diff --git a/Makefile b/Makefile index 82e0cceb..58febeec 100644 --- a/Makefile +++ b/Makefile @@ -33,4 +33,3 @@ ci-master: check prepare-assets .PHONY: ci-release ci-release: check prepare-assets push-release release-branch - diff --git a/scripts/prepare-release-assets.sh b/scripts/prepare-release-assets.sh index 8660274f..b9b3fafa 100755 --- a/scripts/prepare-release-assets.sh +++ b/scripts/prepare-release-assets.sh @@ -7,11 +7,11 @@ readonly RED='\033[0;31m' readonly NC='\033[0m' # No Color function prepareAssets() { - destination=toCopy/ + destination=_dist/ echo "Copy files" - mkdir -p toCopy + mkdir -p _dist # do not fail if there is no .md file cp addons/*.md ${destination} 2>/dev/null || true diff --git a/scripts/push_release.sh b/scripts/push_release.sh index 8693ad14..c88dcb74 100755 --- a/scripts/push_release.sh +++ b/scripts/push_release.sh @@ -28,22 +28,22 @@ if [ -z "$ASSET_UPLOAD_URL" ]; then fi # modify index.yaml, add created\digest field -if [ ! -f toCopy/index.yaml ]; then +if [ ! -f _dist/index.yaml ]; then echo "index.yaml does not exist" exit 1 fi timestamp=$(date "+%Y-%m-%d %H:%M:%S") -for FILE in `ls toCopy/*.tgz`; do - sha=$(sha256sum $FILE | awk '{ print $1 }') # FILE Example: toCopy/minio-2023.tgz - FILE=${FILE%%.tgz*} # remove ".tgz", Example: toCopy/minio-2023 - addonVersion=${FILE##*toCopy/} # remove "path, Example: minio-2023 +for FILE in `ls _dist/*.tgz`; do + sha=$(sha256sum $FILE | awk '{ print $1 }') # FILE Example: _dist/minio-2023.tgz + FILE=${FILE%%.tgz*} # remove ".tgz", Example: _dist/minio-2023 + addonVersion=${FILE##*_dist/} # remove "path, Example: minio-2023 addon=${addonVersion%-*} # remove after "-" , Example: minio version=${addonVersion##*-} # remove before "-", Example: 2023 - a=$addon v=$version t=$timestamp yq -i '(.entries.[env(a)][] | select(.version == env(v)) | .created) = env(t)' toCopy/index.yaml - a=$addon v=$version s=$sha yq -i '(.entries.[env(a)][] | select(.version == env(v)) | .digest) = env(s)' toCopy/index.yaml + a=$addon v=$version t=$timestamp yq -i '(.entries.[env(a)][] | select(.version == env(v)) | .created) = env(t)' _dist/index.yaml + a=$addon v=$version s=$sha yq -i '(.entries.[env(a)][] | select(.version == env(v)) | .digest) = env(s)' _dist/index.yaml done -for FILE in toCopy/*; do +for FILE in _dist/*; do echo "Uploading asset: $FILE to url: $ASSET_UPLOAD_URL?name=${FILE}" curl -s --data-binary @${FILE} -H "Content-Type: application/octet-stream" -H "Authorization: token ${GITHUB_TOKEN}" -X POST "$ASSET_UPLOAD_URL?name=$(basename ${FILE})" done \ No newline at end of file From d0275a9b2516ad3815f36a26c6e31525d6ec2ab3 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 29 May 2025 10:13:38 +0800 Subject: [PATCH 50/93] chore(postgresql-cluster): add extentions timescaledb (#103) --- .../15/chart/postgresql-cluster/values.yaml | 2 +- .../16/chart/postgresql-cluster-16/values.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index 95d57e54..7d103fd1 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -90,7 +90,7 @@ preInitScript: | archive_timeout: 300s archive_command: sh /opt/drycc/postgresql/walbackup.sh %p # timescaledb.license: 'timescale' - shared_preload_libraries: 'auto_explain,pg_stat_statements' + shared_preload_libraries: 'auto_explain,pg_stat_statements,timescaledb' log_destination: 'csvlog' log_filename: postgresql.log logging_collector: on diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml index 9ab405f3..fcf73961 100644 --- a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml @@ -89,8 +89,8 @@ preInitScript: | archive_mode: "on" archive_timeout: 300s archive_command: sh /opt/drycc/postgresql/walbackup.sh %p - # timescaledb.license: 'timescale' - shared_preload_libraries: 'auto_explain,pg_stat_statements' + # timescaledb.license: 'timescale' + shared_preload_libraries: 'auto_explain,pg_stat_statements,timescaledb' log_destination: 'csvlog' log_filename: postgresql.log logging_collector: on From 20bef3127f2eec6e6ee84f7c3359e91141c981e6 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 29 May 2025 10:44:05 +0800 Subject: [PATCH 51/93] chore(mysql-cluster): reset default configuration parameters --- addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml | 3 ++- addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml | 4 +++- addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml | 3 ++- .../mysql-cluster/8.0/plans/standard-32c128g800/values.yaml | 3 ++- addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml | 3 ++- addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml | 3 ++- 6 files changed, 13 insertions(+), 6 deletions(-) diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml index e897821a..b7bfb0ea 100644 --- a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml @@ -19,7 +19,8 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=32 innodb_write_io_threads=32 innodb_buffer_pool_instances=16 diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml index 40a62583..5c34d122 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml @@ -19,11 +19,13 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=4 innodb_write_io_threads=4 innodb_buffer_pool_instances=2 innodb_buffer_pool_size=2147483648 + group_replication_message_cache_size=536870912 max_connections=1000 resources: limits: diff --git a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml index 5cd7245b..98bc94c8 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml @@ -19,7 +19,8 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=4 innodb_write_io_threads=4 innodb_buffer_pool_instances=2 diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml index 92916170..d854cec1 100644 --- a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml @@ -19,7 +19,8 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=64 innodb_write_io_threads=64 innodb_buffer_pool_instances=32 diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml index 9fdcd02b..6312e572 100644 --- a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml @@ -19,7 +19,8 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=8 innodb_write_io_threads=8 innodb_buffer_pool_instances=4 diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml index 5bd7daef..7c7f49a0 100644 --- a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml @@ -19,7 +19,8 @@ primary: max_connect_errors=1000000 open_files_limit=2000000 performance_schema_max_table_instances=200 - thread_cache_size=200 + thread_cache_size=0 + innodb_flush_method=O_DIRECT innodb_read_io_threads=16 innodb_write_io_threads=16 innodb_buffer_pool_instances=8 From 9e0254906d0d3ec7b42429ac27d099550c3420fe Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 30 May 2025 15:48:33 +0800 Subject: [PATCH 52/93] feat(airflow): add version 3 --- addons/airflow/3/chart/airflow-3/.helmignore | 21 + addons/airflow/3/chart/airflow-3/Chart.yaml | 27 + addons/airflow/3/chart/airflow-3/README.md | 1577 ++++++++ .../3/chart/airflow-3/templates/NOTES.txt | 132 + .../3/chart/airflow-3/templates/_helpers.tpl | 638 ++++ .../templates/_init_containers_sidecars.tpl | 626 +++ .../templates/config/configmap-pip.yaml | 25 + .../config/configmap-pod-template.yaml | 270 ++ .../airflow-3/templates/config/configmap.yaml | 89 + .../config/secret-external-broker.yaml | 24 + .../templates/config/secret-external-db.yaml | 19 + .../templates/config/secret-ldap.yaml | 19 + .../templates/config/secret-ssh.yaml | 24 + .../airflow-3/templates/config/secret.yaml | 39 + .../templates/dag-processor/deployment.yaml | 272 ++ .../templates/dag-processor/hpa.yaml | 41 + .../dag-processor/networkpolicy.yaml | 76 + .../dag-processor/poddisruptionbudget.yaml | 25 + .../templates/dag-processor/vpa.yaml | 44 + .../chart/airflow-3/templates/extra-list.yaml | 9 + .../templates/metrics/configmap.yaml | 113 + .../templates/metrics/deployment.yaml | 143 + .../templates/metrics/networkpolicy.yaml | 68 + .../airflow-3/templates/metrics/service.yaml | 38 + .../templates/metrics/servicemonitor.yaml | 50 + .../chart/airflow-3/templates/rbac/role.yaml | 45 + .../airflow-3/templates/rbac/rolebinding.yaml | 24 + .../templates/rbac/serviceaccount.yaml | 18 + .../templates/scheduler/deployment.yaml | 312 ++ .../airflow-3/templates/scheduler/hpa.yaml | 41 + .../templates/scheduler/networkpolicy.yaml | 77 + .../scheduler/poddisruptionbudget.yaml | 25 + .../templates/scheduler/service-headless.yaml | 26 + .../airflow-3/templates/scheduler/vpa.yaml | 44 + .../airflow-3/templates/setup-db-job.yaml | 199 + .../airflow-3/templates/triggerer/hpa.yaml | 41 + .../templates/triggerer/networkpolicy.yaml | 75 + .../triggerer/poddisruptionbudget.yaml | 25 + .../templates/triggerer/service.yaml | 54 + .../templates/triggerer/statefulset.yaml | 324 ++ .../airflow-3/templates/triggerer/vpa.yaml | 44 + .../chart/airflow-3/templates/web/certs.yaml | 86 + .../airflow-3/templates/web/configmap.yaml | 63 + .../airflow-3/templates/web/deployment.yaml | 346 ++ .../3/chart/airflow-3/templates/web/hpa.yaml | 41 + .../templates/web/ingress-tls-secret.yaml | 44 + .../airflow-3/templates/web/ingress.yaml | 55 + .../templates/web/networkpolicy.yaml | 77 + .../templates/web/poddisruptionbudget.yaml | 25 + .../airflow-3/templates/web/service.yaml | 52 + .../airflow-3/templates/web/tls-secret.yaml | 45 + .../3/chart/airflow-3/templates/web/vpa.yaml | 44 + .../chart/airflow-3/templates/worker/hpa.yaml | 41 + .../templates/worker/networkpolicy.yaml | 77 + .../templates/worker/poddisruptionbudget.yaml | 25 + .../templates/worker/service-headless.yaml | 25 + .../templates/worker/statefulset.yaml | 358 ++ .../chart/airflow-3/templates/worker/vpa.yaml | 44 + addons/airflow/3/chart/airflow-3/values.yaml | 3346 +++++++++++++++++ addons/airflow/3/meta.yaml | 111 + .../3/plans/standard-16c48g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../3/plans/standard-16c48g2w/meta.yaml | 6 + .../3/plans/standard-16c48g2w/values.yaml | 53 + .../airflow/3/plans/standard-1c2g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../airflow/3/plans/standard-1c2g2w/meta.yaml | 6 + .../3/plans/standard-1c2g2w/values.yaml | 53 + .../3/plans/standard-24c64g7w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../3/plans/standard-24c64g7w/meta.yaml | 6 + .../3/plans/standard-24c64g7w/values.yaml | 53 + .../airflow/3/plans/standard-2c4g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../airflow/3/plans/standard-2c4g2w/meta.yaml | 6 + .../3/plans/standard-2c4g2w/values.yaml | 53 + .../3/plans/standard-4c16g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../3/plans/standard-4c16g2w/meta.yaml | 6 + .../3/plans/standard-4c16g2w/values.yaml | 53 + .../airflow/3/plans/standard-4c8g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../airflow/3/plans/standard-4c8g2w/meta.yaml | 6 + .../3/plans/standard-4c8g2w/values.yaml | 53 + .../3/plans/standard-8c32g2w/bind.yaml | 43 + .../create-instance-schema.json | 12 + .../3/plans/standard-8c32g2w/meta.yaml | 6 + .../3/plans/standard-8c32g2w/values.yaml | 53 + addons/index.yaml | 2 + 89 files changed, 11488 insertions(+) create mode 100644 addons/airflow/3/chart/airflow-3/.helmignore create mode 100644 addons/airflow/3/chart/airflow-3/Chart.yaml create mode 100644 addons/airflow/3/chart/airflow-3/README.md create mode 100644 addons/airflow/3/chart/airflow-3/templates/NOTES.txt create mode 100644 addons/airflow/3/chart/airflow-3/templates/_helpers.tpl create mode 100644 addons/airflow/3/chart/airflow-3/templates/_init_containers_sidecars.tpl create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/configmap-pip.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/configmap-pod-template.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/configmap.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/secret-external-broker.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/secret-external-db.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/secret-ldap.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/secret-ssh.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/config/secret.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/dag-processor/deployment.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/dag-processor/hpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/dag-processor/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/dag-processor/poddisruptionbudget.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/dag-processor/vpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/extra-list.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/metrics/configmap.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/metrics/deployment.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/metrics/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/metrics/service.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/metrics/servicemonitor.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/rbac/role.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/rbac/rolebinding.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/rbac/serviceaccount.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/deployment.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/hpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/poddisruptionbudget.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/service-headless.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/scheduler/vpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/hpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/poddisruptionbudget.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/service.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/statefulset.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/triggerer/vpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/certs.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/configmap.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/deployment.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/hpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/ingress-tls-secret.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/ingress.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/poddisruptionbudget.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/service.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/tls-secret.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/web/vpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/hpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/networkpolicy.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/poddisruptionbudget.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/service-headless.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml create mode 100644 addons/airflow/3/chart/airflow-3/templates/worker/vpa.yaml create mode 100644 addons/airflow/3/chart/airflow-3/values.yaml create mode 100644 addons/airflow/3/meta.yaml create mode 100644 addons/airflow/3/plans/standard-16c48g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-16c48g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-16c48g2w/values.yaml create mode 100644 addons/airflow/3/plans/standard-1c2g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-1c2g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-1c2g2w/values.yaml create mode 100644 addons/airflow/3/plans/standard-24c64g7w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-24c64g7w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-24c64g7w/values.yaml create mode 100644 addons/airflow/3/plans/standard-2c4g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-2c4g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-2c4g2w/values.yaml create mode 100644 addons/airflow/3/plans/standard-4c16g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-4c16g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-4c16g2w/values.yaml create mode 100644 addons/airflow/3/plans/standard-4c8g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-4c8g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-4c8g2w/values.yaml create mode 100644 addons/airflow/3/plans/standard-8c32g2w/bind.yaml create mode 100644 addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json create mode 100644 addons/airflow/3/plans/standard-8c32g2w/meta.yaml create mode 100644 addons/airflow/3/plans/standard-8c32g2w/values.yaml diff --git a/addons/airflow/3/chart/airflow-3/.helmignore b/addons/airflow/3/chart/airflow-3/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/addons/airflow/3/chart/airflow-3/Chart.yaml b/addons/airflow/3/chart/airflow-3/Chart.yaml new file mode 100644 index 00000000..1619d4b8 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/Chart.yaml @@ -0,0 +1,27 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: WorkFlow + licenses: Apache-2.0 +apiVersion: v2 +appVersion: "3.0.1" +dependencies: +- name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.4 +description: Apache Airflow is a tool to express and execute workflows as directed acyclic graphs (DAGs). It includes utilities to schedule tasks, monitor task progress and handle task dependencies. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/airflow/img/airflow-stack-220x234.png +keywords: +- apache +- airflow +- workflow +- dag +maintainers: +- name: Drycc Community. + url: https://github.com/bitnami/charts +name: airflow +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/airflow +version: 16.1.7 diff --git a/addons/airflow/3/chart/airflow-3/README.md b/addons/airflow/3/chart/airflow-3/README.md new file mode 100644 index 00000000..a7a9a39d --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/README.md @@ -0,0 +1,1577 @@ + + +# Bitnami package for Apache Airflow + +Apache Airflow is a tool to express and execute workflows as directed acyclic graphs (DAGs). It includes utilities to schedule tasks, monitor task progress and handle task dependencies. + +[Overview of Apache Airflow](https://airflow.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/airflow +``` + +Looking to use Apache Airflow in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. + +## Introduction + +This chart bootstraps an [Apache Airflow](https://github.com/bitnami/containers/tree/main/bitnami/airflow) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/airflow +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The command deploys Apache Airflow on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Executors + +Airflow supports different [Executors](https://www.astronomer.io/docs/learn/airflow-executors-explained) and this Helm chart provides support for several of them. You can choose the executor you want to use by setting the `executor` parameter. + +#### CeleryExecutor + +The Celery executor (default one) uses a message queue system (Redis® in this case) to coordinate tasks between pre-configured workers. + +#### KubernetesExecutor + +The Kubernetes executor creates a new worker pod for every task instance using the `pod_template.yaml` that you can find at [templates/config/configmap.yaml](https://github.com/bitnami/charts/blob/main/bitnami/airflow/templates/config/configmap.yaml). This template can be overwritten using `worker.podTemplate`. To enable `KubernetesExecutor` you can set the following parameters: + +```console +executor=KubernetesExecutor +rbac.create=true +serviceAccount.create=true +redis.enabled=false +``` + +> NOTE: Redis® is not needed to be deployed when using KubernetesExecutor so you can disable it using `redis.enabled=false`. + +#### CeleryKubernetesExecutor + +The CeleryKubernetesExecutor (introduced in Airflow 2.0) is a combination of both the Celery and the Kubernetes executors. Tasks will be executed using Celery by default, but those tasks that require it can be executed in a Kubernetes pod using the 'kubernetes' queue. + +> The `CeleryKubernetesExecutor` has been deprecated starting with Airflow 3.0.0. + +#### LocalExecutor + +The Local executor runs tasks by spawning processes in the Scheduler pods. To enable `LocalExecutor` set the following parameters. + +```console +executor=LocalExecutor +redis.enabled=false +``` + +#### LocalKubernetesExecutor + +The LocalKubernetesExecutor (introduced in Airflow 2.3) is a combination of both the Local and the Kubernetes executors. Tasks will be executed in the scheduler by default, but those tasks that require it can be executed in a Kubernetes pod using the 'kubernetes' queue. + +> The `LocalKubernetesExecutor` has been deprecated starting with Airflow 3.0.0. + +#### SequentialExecutor + +This executor will only run one task instance at a time in the Scheduler pods. For production use case, please use other executors. To enable `SequentialExecutor` set the following parameters. + +```console +executor=SequentialExecutor +redis.enabled=false +``` + +> The `SequentialExecutor` has been deprecated starting with Airflow 3.0.0. + +### Update credentials + +Bitnami charts configure credentials at first boot. Any further change in the secrets or credentials require manual intervention. Follow these instructions: + +- Update the user password following [the upstream documentation](https://airflow.apache.org/docs/apache-airflow-providers-fab/stable/cli-ref.html#reset-password) +- Update the password secret with the new values (replace the SECRET_NAME, PASSWORD, FERNET_KEY and SECRET_KEY placeholders) + +```shell +kubectl create secret generic SECRET_NAME --from-literal=airflow-password=PASSWORD --from-literal=airflow-fernet-key=FERNET_KEY --from-literal=airflow-secret-key=SECRET_KEY --from-literal=airflow-jwt-secret-key=JWT_SECRET_KEY --dry-run -o yaml | kubectl apply -f - +``` + +### Airflow configuration file + +By default, the Airflow configuration file is auto-generated based on the chart parameters you set. For instance, the `executor` parameter will be used to set the `executor` class under the `[core]` section. + +You can also provider your own configuration by setting the `configuration` parameter. This parameter expects the configuration as a sections/keys/values dictionary on YAML format, then it's converted to .cfg format by the chart. For instance, using a configuration like the one below... + +```yaml +configuration: + core: + dags_folder: "/opt/drycc/airflow/dags" +``` + +... the chart will translate it to the following configuration file: + +```ini +[core] +dags_folder = "/opt/drycc/airflow/dags" +``` + +As an alternative to providing the whole configuration, you can also extend the default configuration using the `overrideConfiguration` parameter. The values set in this parameter, which also expects YAML format, will be merged with the default configuration or those set in the `configuration` parameter taking precedence. + +### Scaling worker pods + +Sometime when using large workloads a fixed number of worker pods may make task to take a long time to be executed. This chart provide two ways for scaling worker pods. + +- If you are using `KubernetesExecutor` auto scaling pods would be done by the Scheduler without adding anything more. +- If you are using `SequentialExecutor` you would have to enable `worker.autoscaling` to do so, please, set the following parameters. It will use autoscaling by default configuration that you can change using `worker.autoscaling.replicas.*` and `worker.autoscaling.targets.*`. + +```console +worker.autoscaling.enabled=true +worker.resources.requests.cpu=200m +worker.resources.requests.memory=250Mi +``` + +### Generate a Fernet key + +A Fernet key is required in order to encrypt password within connections. The Fernet key must be a base64-encoded 32-byte key. + +Learn how to generate one [here](https://airflow.apache.org/docs/apache-airflow/stable/security/secrets/fernet.html#generating-fernet-key). + +### Generate a Secret key + +Secret key used to run your Flask app. It should be as random as possible. + +> Note: when running multiple Webserver instances, make sure all of them use the same secret key. Otherwise you may face the error "CSRF session token is missing". + +### Load DAG files + +There are two different ways to load your custom DAG files into the Airflow chart. All of them are compatible so you can use more than one at the same time. + +#### Option 1: Specify an existing config map + +You can manually create a config map containing all your DAG files and then pass the name when deploying Airflow chart. For that, you can set the parameters below: + +```console +dags.enabled=true +dags.existingConfigmap=my-dags-configmap +``` + +#### Option 2: Get your DAG files from a git repository + +You can store all your DAG files on GitHub repositories and then clone to the Airflow pods with an initContainer. The repositories will be periodically updated using a sidecar container. In order to do that, you can deploy airflow with the following options: + +> Note: When enabling git synchronization, an init container and sidecar container will be added for all the pods running airflow, this will allow scheduler, worker and web component to reach dags if it was needed. + +```console +dags.enabled=true +dags.repositories[0].repository=https://github.com/USERNAME/REPOSITORY +dags.repositories[0].name=REPO-IDENTIFIER +dags.repositories[0].branch=master +``` + +If you use a private repository from GitHub, a possible option to clone the files is using a [Personal Access Token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and using it as part of the URL: `https://USERNAME:PERSONAL_ACCESS_TOKEN@github.com/USERNAME/REPOSITORY`. Alternatively, you can clone the repository using SSH, to do so, you can set your private SSH Key setting the `dags.sshKey` parameter or use an existing secret containing your private SSH key setting the `dags.existingSshKeySecret` and `dags.existingSshKeySecretKey` parameters. + +### Loading Plugins + +You can load plugins into the chart by specifying a git repository containing the plugin files. The repository will be periodically updated using a sidecar container. In order to do that, you can deploy airflow with the following options: + +> Note: When enabling git synchronization, an init container and sidecar container will be added for all the pods running airflow, this will allow scheduler, worker and web component to reach plugins if it was needed. + +```console +plugins.enabled=true +plugins.repositories[0].repository=https://github.com/teamclairvoyant/airflow-rest-api-plugin.git +plugins.repositories[0].branch=v1.0.9-branch +plugins.repositories[0].path=plugins +``` + +### Install extra python packages + +This chart allows you to mount volumes using `extraVolumes` and `extraVolumeMounts` in every component (web, scheduler, worker). Mounting a `requirements.txt` using these options to `/drycc/python/requirements.txt` will execute `pip install -r /drycc/python/requirements.txt` on container start + +### Existing Secrets + +You can use an existing secret to configure your Airflow auth, external Postgres, and external Redis® passwords: + +```console +postgresql.enabled=false +externalDatabase.host=my.external.postgres.host +externalDatabase.user=bn_airflow +externalDatabase.database=bitnami_airflow +externalDatabase.existingSecret=all-my-secrets +externalDatabase.existingSecretPasswordKey=postgresql-password + +redis.enabled=false +externalRedis.host=my.external.redis.host +externalRedis.existingSecret=all-my-secrets +externalRedis.existingSecretPasswordKey=redis-password + +auth.existingSecret=all-my-secrets +``` + +The expected secret resource looks as follows: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: all-my-secrets +type: Opaque +data: + airflow-password: "Smo1QTJLdGxXMg==" + airflow-fernet-key: "YVRZeVJVWnlXbU4wY1dOalVrdE1SV3cxWWtKeFIzWkVRVTVrVjNaTFR6WT0=" + airflow-secret-key: "a25mQ1FHTUh3MnFRSk5KMEIyVVU2YmN0VGRyYTVXY08=" + postgresql-password: "cG9zdGdyZXMK" + redis-password: "cmVkaXMK" +``` + +This is useful if you plan on using [Bitnami's sealed secrets](https://github.com/bitnami-labs/sealed-secrets) to manage your passwords. + +Alternatively, you can also use a SQL connection string to connect to an external database. This can be done by: + +- Setting the `externalDatabase.sqlConnection` parameter: + +```console +postgresql.enabled=false +externalDatabase.sqlConnection=postgresql://user:password@host:port/dbname +``` + +- Or via the `externalDatabase.existingSecret` and `externalDatabase.existingSecretSqlConnectionKey` parameters: + +```console +postgresql.enabled=false +externalDatabase.existingSecret=db-secret +externalDatabase.existingSecretSqlConnectionKey=sql-connection +``` + +### Database setup + +By default, this chart setups the database (init or migrate the schema) and creates the admin user using a K8s job that is created when the chart release is installed or upgraded, and deleted once it succeeds. This job uses [Chart hooks](https://helm.sh/docs/topics/charts_hooks), so it won't be deleted if you're using Helm exclusively for its rendering capabilities (e.g. when using ArgoCD or FluxCD). + +Alternatively, you can disable this behavior by setting the `setupDBJob.enabled` parameter to `false`. In this case, the database setup and admin user creation will be done during the Webserver startup. + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `metrics.enabled` to `true`. This will configure Airflow components to send StatsD metrics to the [StatsD exporter](https://github.com/prometheus/statsd_exporter) that transforms them into Prometheus metrics. The StatsD exporter is deployed as a standalone deployment and service in the same namespace as the Airflow deployment. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `metrics.serviceMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Ingress + +This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/main/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/main/bitnami/contour) you can utilize the ingress controller to serve your application. + +To enable Ingress integration, set `ingress.enabled` to `true`. + +The most common scenario is to have one host name mapped to the deployment. In this case, the `ingress.hostname` property can be used to set the host name. The `ingress.tls` parameter can be used to add the TLS configuration for this host. However, it is also possible to have more than one host. To facilitate this, the `ingress.extraHosts` parameter (if available) can be set with the host names specified as an array. The `ingress.extraTLS` parameter (if available) can also be used to add the TLS configuration for extra hosts. + +> NOTE: For each host specified in the `ingress.extraHosts` parameter, it is necessary to set a name, path, and any annotations that the Ingress controller should know about. Not all annotations are supported by all Ingress controllers, but [this annotation reference document](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md) lists the annotations supported by many popular Ingress controllers. + +Adding the TLS parameter (where available) will cause the chart to generate HTTPS URLs, and the application will be available on port 443. The actual TLS secrets do not have to be generated by this chart. However, if TLS is enabled, the Ingress record will not work until the TLS secret exists. + +[Learn more about Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/). + +### Securing traffic using TLS + +By default, this chart assumes TLS is managed by the Ingress Controller and terminates the TLS connection in the Ingress Controller. This can be done by setting `ingress.enabled` and `ingress.tls` parameters to `true` as explained in the section above. However, it is possible to configure TLS encryption for the Airflow Webserver directly by setting the `web.tls.enabled` parameter to `true`. + +It is necessary to create a secret containing the TLS certificates and pass it to the chart via the `web.tls.existingSecret` parameter. The secret should contain a `tls.crt` and `tls.key` keys including the certificate and key files respectively. For example: + +```console +kubectl create secret generic web-tls-secret --from-file=./tls.crt --from-file=./tls.key +``` + +You can manually create the required TLS certificates or relying on the chart auto-generation capabilities. The chart supports two different ways to auto-generate the required certificates: + +- Using Helm capabilities. Enable this feature by setting `web.tls.autoGenerated.enabled` to `true` and `web.tls.autoGenerated.engine` to `helm`. +- Relying on CertManager (please note it's required to have CertManager installed in your K8s cluster). Enable this feature by setting `web.tls.autoGenerated.enabled` to `true` and `web.tls.autoGenerated.engine` to `cert-manager`. Please note it's supported to use an existing Issuer/ClusterIssuer for issuing the TLS certificates by setting the `web.tls.autoGenerated.certManager.existingIssuer` and `web.tls.autoGenerated.certManager.existingIssuerKind` parameters. + +### Sidecars + +If additional containers are needed in the same pod as Apache Airflow (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter. + +```yaml +sidecars: +- name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter (where available), as shown in the example below: + +```yaml +service: + extraPorts: + - name: extraPort + port: 11311 + targetPort: 11311 +``` + +If additional init containers are needed in the same pod, they can be defined using the `initContainers` parameter. Here is an example: + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Learn more about [sidecar containers](https://kubernetes.io/docs/concepts/workloads/pods/) and [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/). + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Backup and restore + +To back up and restore Helm chart deployments on Kubernetes, you need to back up the persistent volumes from the source deployment and attach them to a new deployment using [Velero](https://velero.io/), a Kubernetes backup/restore tool. Find the instructions for using Velero in [this guide](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-backup-restore-deployments-velero-index.html). + +## Persistence + +The Bitnami Airflow chart relies on the PostgreSQL chart persistence. This means that Airflow does not persist anything. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | +| `global.compatibility.omitEmptySeLinuxOptions` | If set to true, removes the seLinuxOptions from the securityContexts when it is set to an empty object | `false` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `apiVersions` | Override Kubernetes API versions reported by .Capabilities | `[]` | +| `nameOverride` | String to partially override common.names.name | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the chart release | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the chart release | `["infinity"]` | + +### Airflow common parameters + +| Name | Description | Value | +| --------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `image.registry` | Airflow image registry | `REGISTRY_NAME` | +| `image.repository` | Airflow image repository | `REPOSITORY_NAME/airflow` | +| `image.digest` | Airflow image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Airflow image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Airflow image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `auth.username` | Username to access web UI | `user` | +| `auth.password` | Password to access web UI | `""` | +| `auth.fernetKey` | Fernet key to secure connections | `""` | +| `auth.secretKey` | Secret key to run your flask app | `""` | +| `auth.jwtSecretKey` | JWT secret key to run your flask app | `""` | +| `auth.existingSecret` | Name of an existing secret to use for Airflow credentials | `""` | +| `executor` | Airflow executor. Allowed values: `LocalExecutor`, `CeleryExecutor`, `KubernetesExecutor`, `SequentialExecutor` (Airflow 2.x only), `CeleryKubernetesExecutor` (Airflow 2.x only), and `LocalKubernetesExecutor` (Airflow 2.x only) | `CeleryExecutor` | +| `loadExamples` | Switch to load some Airflow examples | `false` | +| `configuration` | Specify content for Airflow config file (auto-generated based on other parameters otherwise) | `{}` | +| `overrideConfiguration` | Airflow common configuration override. Values defined here takes precedence over the ones defined at `configuration` | `{}` | +| `localSettings` | Specify content for Airflow local settings (airflow_local_settings.py) | `""` | +| `existingConfigmap` | Name of an existing ConfigMap with the Airflow config file and, optionally, the local settings file | `""` | +| `dags.enabled` | Enable loading DAGs from a ConfigMap or Git repositories | `false` | +| `dags.existingConfigmap` | Name of an existing ConfigMap with all the DAGs files you want to load in Airflow | `""` | +| `dags.repositories` | Array of repositories from which to download DAG files | `[]` | +| `dags.sshKey` | SSH Private key used to clone/sync DAGs from Git repositories (ignored if dags.existingSshKeySecret is set) | `""` | +| `dags.existingSshKeySecret` | Name of a secret containing the SSH private key used to clone/sync DAGs from Git repositories | `""` | +| `dags.existingSshKeySecretKey` | Key in the existing secret containing the SSH private key | `""` | +| `plugins.enabled` | Enable loading plugins from Git repositories | `false` | +| `plugins.repositories` | Array of repositories from which to download plugins | `[]` | +| `plugins.sshKey` | SSH Private key used to clone/sync plugins from Git repositories (ignored if plugins.existingSshKeySecret is set) | `""` | +| `plugins.existingSshKeySecret` | Name of a secret containing the SSH private key used to clone/sync plugins from Git repositories | `""` | +| `plugins.existingSshKeySecretKey` | Key in the existing secret containing the SSH private key | `""` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.enabled` | Enabled "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.seLinuxOptions` | Set SELinux options in "prepare-config" init-containers | `{}` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsUser` | Set runAsUser in "prepare-config" init-containers' Security Context | `1001` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsGroup` | Set runAsUser in "prepare-config" init-containers' Security Context | `1001` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.privileged` | Set privileged in "prepare-config" init-containers' Security Context | `false` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "prepare-config" init-containers' Security Context | `false` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.add` | List of capabilities to be added in "prepare-config" init-containers | `[]` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "prepare-config" init-containers | `["ALL"]` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "prepare-config" init-containers | `RuntimeDefault` | +| `defaultInitContainers.prepareConfig.resourcesPreset` | Set Airflow "prepare-config" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.prepareConfig.resources is set (defaultInitContainers.prepareConfig.resources is recommended for production). | `nano` | +| `defaultInitContainers.prepareConfig.resources` | Set Airflow "prepare-config" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.enabled` | Enabled "wait-for-db-migrations" init-containers' Security Context | `true` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.seLinuxOptions` | Set SELinux options in "wait-for-db-migrations" init-containers | `{}` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsUser` | Set runAsUser in "wait-for-db-migrations" init-containers' Security Context | `1001` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsGroup` | Set runAsUser in "wait-for-db-migrations" init-containers' Security Context | `1001` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "wait-for-db-migrations" init-containers' Security Context | `true` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "wait-for-db-migrations" init-containers' Security Context | `true` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.privileged` | Set privileged in "wait-for-db-migrations" init-containers' Security Context | `false` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "wait-for-db-migrations" init-containers' Security Context | `false` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.capabilities.add` | List of capabilities to be added in "wait-for-db-migrations" init-containers | `[]` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "wait-for-db-migrations" init-containers | `["ALL"]` | +| `defaultInitContainers.waitForDBMigrations.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "wait-for-db-migrations" init-containers | `RuntimeDefault` | +| `defaultInitContainers.waitForDBMigrations.resourcesPreset` | Set Airflow "wait-for-db-migrations" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.waitForDBMigrations.resources is set (defaultInitContainers.waitForDBMigrations.resources is recommended for production). | `micro` | +| `defaultInitContainers.waitForDBMigrations.resources` | Set Airflow "wait-for-db-migrations" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultInitContainers.loadDAGsPlugins.command` | Override cmd | `[]` | +| `defaultInitContainers.loadDAGsPlugins.args` | Override args | `[]` | +| `defaultInitContainers.loadDAGsPlugins.extraVolumeMounts` | Add extra volume mounts | `[]` | +| `defaultInitContainers.loadDAGsPlugins.extraEnvVars` | Add extra environment variables | `[]` | +| `defaultInitContainers.loadDAGsPlugins.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `defaultInitContainers.loadDAGsPlugins.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.enabled` | Enabled "load-dags-plugins" init-containers' Security Context | `true` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.seLinuxOptions` | Set SELinux options in "load-dags-plugins" init-containers | `{}` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsUser` | Set runAsUser in "load-dags-plugins" init-containers' Security Context | `1001` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsGroup` | Set runAsUser in "load-dags-plugins" init-containers' Security Context | `1001` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "load-dags-plugins" init-containers' Security Context | `true` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "load-dags-plugins" init-containers' Security Context | `true` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.privileged` | Set privileged in "load-dags-plugins" init-containers' Security Context | `false` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "load-dags-plugins" init-containers' Security Context | `false` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.capabilities.add` | List of capabilities to be added in "load-dags-plugins" init-containers | `[]` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "load-dags-plugins" init-containers | `["ALL"]` | +| `defaultInitContainers.loadDAGsPlugins.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "load-dags-plugins" init-containers | `RuntimeDefault` | +| `defaultInitContainers.loadDAGsPlugins.resourcesPreset` | Set Airflow "load-dags-plugins" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.loadDAGsPlugins.resources is set (defaultInitContainers.loadDAGsPlugins.resources is recommended for production). | `nano` | +| `defaultInitContainers.loadDAGsPlugins.resources` | Set Airflow "load-dags-plugins" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultSidecars.syncDAGsPlugins.interval` | Interval in seconds to pull the git repository containing the DAGs and/or plugins | `60` | +| `defaultSidecars.syncDAGsPlugins.command` | Override cmd | `[]` | +| `defaultSidecars.syncDAGsPlugins.args` | Override args | `[]` | +| `defaultSidecars.syncDAGsPlugins.extraVolumeMounts` | Add extra volume mounts | `[]` | +| `defaultSidecars.syncDAGsPlugins.extraEnvVars` | Add extra environment variables | `[]` | +| `defaultSidecars.syncDAGsPlugins.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `defaultSidecars.syncDAGsPlugins.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.enabled` | Enabled "sync-dags-plugins" sidecars' Security Context | `true` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.seLinuxOptions` | Set SELinux options in "sync-dags-plugins" sidecars | `{}` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsUser` | Set runAsUser in "sync-dags-plugins" sidecars' Security Context | `1001` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsGroup` | Set runAsUser in "sync-dags-plugins" sidecars' Security Context | `1001` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "sync-dags-plugins" sidecars' Security Context | `true` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "sync-dags-plugins" sidecars' Security Context | `true` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.privileged` | Set privileged in "sync-dags-plugins" sidecars' Security Context | `false` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "sync-dags-plugins" sidecars' Security Context | `false` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.capabilities.add` | List of capabilities to be added in "sync-dags-plugins" sidecars | `[]` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "sync-dags-plugins" sidecars | `["ALL"]` | +| `defaultSidecars.syncDAGsPlugins.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "sync-dags-plugins" sidecars | `RuntimeDefault` | +| `defaultSidecars.syncDAGsPlugins.resourcesPreset` | Set Airflow "sync-dags-plugins" sidecar resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultSidecars.syncDAGsPlugins.resources is set (defaultSidecars.syncDAGsPlugins.resources is recommended for production). | `nano` | +| `defaultSidecars.syncDAGsPlugins.resources` | Set Airflow "sync-dags-plugins" sidecar requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `extraEnvVars` | Add extra environment variables for all the Airflow pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables for all the Airflow pods | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables for all the Airflow pods | `""` | +| `extraEnvVarsSecrets` | List of secrets with extra environment variables for all the Airflow pods | `[]` | +| `sidecars` | Add additional sidecar containers to all the Airflow pods | `[]` | +| `initContainers` | Add additional init containers to all the Airflow pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for all the Airflow pods | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the all the Airflow pods | `[]` | + +### Airflow webserver parameters + +| Name | Description | Value | +| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `web.baseUrl` | URL used to access to Airflow webserver | `""` | +| `web.configuration` | Specify content for webserver_config.py (auto-generated based on other env. vars otherwise) | `""` | +| `web.extraConfiguration` | Specify extra content to be appended to default webserver_config.py (ignored if `web.configuration` or `web.existingConfigmap` are set) | `""` | +| `web.existingConfigmap` | Name of an existing config map containing the Airflow webserver config file | `""` | +| `web.tls.enabled` | Enable TLS configuration for Airflow webserver | `false` | +| `web.tls.autoGenerated.enabled` | Enable automatic generation of TLS certificates | `true` | +| `web.tls.autoGenerated.engine` | Mechanism to generate the certificates (allowed values: helm, cert-manager) | `helm` | +| `web.tls.autoGenerated.certManager.existingIssuer` | The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine) | `""` | +| `web.tls.autoGenerated.certManager.existingIssuerKind` | Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine) | `""` | +| `web.tls.autoGenerated.certManager.keyAlgorithm` | Key algorithm for the certificates (only for `cert-manager` engine) | `RSA` | +| `web.tls.autoGenerated.certManager.keySize` | Key size for the certificates (only for `cert-manager` engine) | `2048` | +| `web.tls.autoGenerated.certManager.duration` | Duration for the certificates (only for `cert-manager` engine) | `2160h` | +| `web.tls.autoGenerated.certManager.renewBefore` | Renewal period for the certificates (only for `cert-manager` engine) | `360h` | +| `web.tls.ca` | CA certificate for TLS. Ignored if `tls.existingSecret` is set | `""` | +| `web.tls.cert` | TLS certificate for Airflow webserver. Ignored if `tls.master.existingSecret` is set | `""` | +| `web.tls.key` | TLS key for Airflow webserver. Ignored if `tls.master.existingSecret` is set | `""` | +| `web.tls.existingSecret` | The name of an existing Secret containing the Airflow webserver certificates for TLS | `""` | +| `web.command` | Override default container command (useful when using custom images) | `[]` | +| `web.args` | Override default container args (useful when using custom images) | `[]` | +| `web.extraEnvVars` | Array with extra environment variables to add Airflow webserver pods | `[]` | +| `web.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow webserver pods | `""` | +| `web.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow webserver pods | `""` | +| `web.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow webserver pods | `[]` | +| `web.containerPorts.http` | Airflow webserver HTTP container port | `8080` | +| `web.replicaCount` | Number of Airflow webserver replicas | `1` | +| `web.livenessProbe.enabled` | Enable livenessProbe on Airflow webserver containers | `true` | +| `web.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `web.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `web.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `web.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `web.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `web.readinessProbe.enabled` | Enable readinessProbe on Airflow webserver containers | `true` | +| `web.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `web.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `web.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `web.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `web.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `web.startupProbe.enabled` | Enable startupProbe on Airflow webserver containers | `false` | +| `web.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `web.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `web.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `web.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `web.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `web.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `web.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `web.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `web.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if web.resources is set (web.resources is recommended for production). | `medium` | +| `web.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `web.podSecurityContext.enabled` | Enabled Airflow webserver pods' Security Context | `true` | +| `web.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `web.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `web.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `web.podSecurityContext.fsGroup` | Set Airflow webserver pod's Security Context fsGroup | `1001` | +| `web.containerSecurityContext.enabled` | Enabled Airflow webserver containers' Security Context | `true` | +| `web.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `web.containerSecurityContext.runAsUser` | Set Airflow webserver containers' Security Context runAsUser | `1001` | +| `web.containerSecurityContext.runAsGroup` | Set Airflow webserver containers' Security Context runAsGroup | `1001` | +| `web.containerSecurityContext.runAsNonRoot` | Set Airflow webserver containers' Security Context runAsNonRoot | `true` | +| `web.containerSecurityContext.privileged` | Set web container's Security Context privileged | `false` | +| `web.containerSecurityContext.allowPrivilegeEscalation` | Set web container's Security Context allowPrivilegeEscalation | `false` | +| `web.containerSecurityContext.readOnlyRootFilesystem` | Set web container's Security Context readOnlyRootFilesystem | `true` | +| `web.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `web.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `web.lifecycleHooks` | for the Airflow webserver container(s) to automate configuration before or after startup | `{}` | +| `web.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `web.hostAliases` | Deployment pod host aliases | `[]` | +| `web.podLabels` | Add extra labels to the Airflow webserver pods | `{}` | +| `web.podAnnotations` | Add extra annotations to the Airflow webserver pods | `{}` | +| `web.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `web.affinity` | Affinity for Airflow webserver pods assignment (evaluated as a template) | `{}` | +| `web.nodeAffinityPreset.key` | Node label key to match. Ignored if `web.affinity` is set. | `""` | +| `web.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `web.nodeAffinityPreset.values` | Node label values to match. Ignored if `web.affinity` is set. | `[]` | +| `web.nodeSelector` | Node labels for Airflow webserver pods assignment | `{}` | +| `web.podAffinityPreset` | Pod affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `web.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `web.tolerations` | Tolerations for Airflow webserver pods assignment | `[]` | +| `web.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `web.priorityClassName` | Priority Class Name | `""` | +| `web.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `web.terminationGracePeriodSeconds` | Seconds Airflow webserver pod needs to terminate gracefully | `""` | +| `web.updateStrategy.type` | Airflow webserver deployment strategy type | `RollingUpdate` | +| `web.updateStrategy.rollingUpdate` | Airflow webserver deployment rolling update configuration parameters | `{}` | +| `web.sidecars` | Add additional sidecar containers to the Airflow webserver pods | `[]` | +| `web.initContainers` | Add additional init containers to the Airflow webserver pods | `[]` | +| `web.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow webserver pods | `[]` | +| `web.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow webserver pods | `[]` | +| `web.pdb.create` | Deploy a pdb object for the Airflow webserver pods | `true` | +| `web.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow webserver replicas | `""` | +| `web.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow webserver replicas | `""` | +| `web.autoscaling.vpa.enabled` | Enable VPA for Airflow webserver | `false` | +| `web.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `web.autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `web.autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `web.autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `web.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `web.autoscaling.hpa.enabled` | Enable HPA for Airflow webserver | `false` | +| `web.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `web.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `web.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `web.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `web.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `web.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `web.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `web.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `web.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `web.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `web.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow scheduler parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `scheduler.replicaCount` | Number of scheduler replicas | `1` | +| `scheduler.command` | Override cmd | `[]` | +| `scheduler.args` | Override args | `[]` | +| `scheduler.extraEnvVars` | Add extra environment variables | `[]` | +| `scheduler.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `scheduler.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `scheduler.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow scheduler pods | `[]` | +| `scheduler.livenessProbe.enabled` | Enable livenessProbe on Airflow scheduler containers | `true` | +| `scheduler.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `scheduler.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `scheduler.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `15` | +| `scheduler.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `scheduler.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `scheduler.readinessProbe.enabled` | Enable readinessProbe on Airflow scheduler containers | `true` | +| `scheduler.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `scheduler.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `scheduler.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `15` | +| `scheduler.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `scheduler.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `scheduler.startupProbe.enabled` | Enable startupProbe on Airflow scheduler containers | `false` | +| `scheduler.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `scheduler.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `scheduler.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `scheduler.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `scheduler.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `scheduler.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `scheduler.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `scheduler.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if scheduler.resources is set (scheduler.resources is recommended for production). | `small` | +| `scheduler.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `scheduler.podSecurityContext.enabled` | Enabled Airflow scheduler pods' Security Context | `true` | +| `scheduler.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `scheduler.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `scheduler.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `scheduler.podSecurityContext.fsGroup` | Set Airflow scheduler pod's Security Context fsGroup | `1001` | +| `scheduler.containerSecurityContext.enabled` | Enabled Airflow scheduler containers' Security Context | `true` | +| `scheduler.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `scheduler.containerSecurityContext.runAsUser` | Set Airflow scheduler containers' Security Context runAsUser | `1001` | +| `scheduler.containerSecurityContext.runAsGroup` | Set Airflow scheduler containers' Security Context runAsGroup | `1001` | +| `scheduler.containerSecurityContext.runAsNonRoot` | Set Airflow scheduler containers' Security Context runAsNonRoot | `true` | +| `scheduler.containerSecurityContext.privileged` | Set scheduler container's Security Context privileged | `false` | +| `scheduler.containerSecurityContext.allowPrivilegeEscalation` | Set scheduler container's Security Context allowPrivilegeEscalation | `false` | +| `scheduler.containerSecurityContext.readOnlyRootFilesystem` | Set scheduler container's Security Context readOnlyRootFilesystem | `true` | +| `scheduler.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `scheduler.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `scheduler.lifecycleHooks` | for the Airflow scheduler container(s) to automate configuration before or after startup | `{}` | +| `scheduler.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `scheduler.hostAliases` | Deployment pod host aliases | `[]` | +| `scheduler.podLabels` | Add extra labels to the Airflow scheduler pods | `{}` | +| `scheduler.podAnnotations` | Add extra annotations to the Airflow scheduler pods | `{}` | +| `scheduler.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `scheduler.affinity` | Affinity for Airflow scheduler pods assignment (evaluated as a template) | `{}` | +| `scheduler.nodeAffinityPreset.key` | Node label key to match. Ignored if `scheduler.affinity` is set. | `""` | +| `scheduler.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `scheduler.nodeAffinityPreset.values` | Node label values to match. Ignored if `scheduler.affinity` is set. | `[]` | +| `scheduler.nodeSelector` | Node labels for Airflow scheduler pods assignment | `{}` | +| `scheduler.podAffinityPreset` | Pod affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `scheduler.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `scheduler.tolerations` | Tolerations for Airflow scheduler pods assignment | `[]` | +| `scheduler.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `scheduler.priorityClassName` | Priority Class Name | `""` | +| `scheduler.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `scheduler.terminationGracePeriodSeconds` | Seconds Airflow scheduler pod needs to terminate gracefully | `""` | +| `scheduler.updateStrategy.type` | Airflow scheduler deployment strategy type | `RollingUpdate` | +| `scheduler.updateStrategy.rollingUpdate` | Airflow scheduler deployment rolling update configuration parameters | `{}` | +| `scheduler.sidecars` | Add additional sidecar containers to the Airflow scheduler pods | `[]` | +| `scheduler.initContainers` | Add additional init containers to the Airflow scheduler pods | `[]` | +| `scheduler.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow scheduler pods | `[]` | +| `scheduler.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow scheduler pods | `[]` | +| `scheduler.pdb.create` | Deploy a pdb object for the Airflow scheduler pods | `true` | +| `scheduler.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `""` | +| `scheduler.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `""` | +| `scheduler.autoscaling.vpa.enabled` | Enable VPA for Airflow scheduler | `false` | +| `scheduler.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `scheduler.autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `scheduler.autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `scheduler.autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `scheduler.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `scheduler.autoscaling.hpa.enabled` | Enable HPA for Airflow scheduler | `false` | +| `scheduler.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `scheduler.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `scheduler.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `scheduler.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `scheduler.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `scheduler.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `scheduler.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `scheduler.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `scheduler.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `scheduler.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `scheduler.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow Dag Processor parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `dagProcessor.enabled` | Run Airflow Dag Processor Manager as a standalone component | `true` | +| `dagProcessor.replicaCount` | Number of Airflow Dag Processor replicas | `1` | +| `dagProcessor.command` | Override default Airflow Dag Processor cmd | `[]` | +| `dagProcessor.args` | Override default Airflow Dag Processor args | `[]` | +| `dagProcessor.extraEnvVars` | Add extra environment variables to Airflow Dag Processor containers | `[]` | +| `dagProcessor.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `dagProcessor.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `dagProcessor.livenessProbe.enabled` | Enable livenessProbe on Airflow Dag Processor containers | `true` | +| `dagProcessor.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `dagProcessor.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `dagProcessor.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `15` | +| `dagProcessor.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `dagProcessor.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `dagProcessor.readinessProbe.enabled` | Enable readinessProbe on Airflow Dag Processor containers | `true` | +| `dagProcessor.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `dagProcessor.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `dagProcessor.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `15` | +| `dagProcessor.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `dagProcessor.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `dagProcessor.startupProbe.enabled` | Enable startupProbe on Airflow Dag Processor containers | `false` | +| `dagProcessor.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `dagProcessor.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `dagProcessor.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `dagProcessor.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `dagProcessor.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `dagProcessor.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `dagProcessor.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `dagProcessor.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `dagProcessor.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dagProcessor.resources is set (dagProcessor.resources is recommended for production). | `small` | +| `dagProcessor.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `dagProcessor.podSecurityContext.enabled` | Enabled Airflow Dag Processor pods' Security Context | `true` | +| `dagProcessor.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `dagProcessor.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `dagProcessor.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `dagProcessor.podSecurityContext.fsGroup` | Set Airflow Dag Processor pod's Security Context fsGroup | `1001` | +| `dagProcessor.containerSecurityContext.enabled` | Enabled Airflow Dag Processor containers' Security Context | `true` | +| `dagProcessor.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `dagProcessor.containerSecurityContext.runAsUser` | Set Airflow Dag Processor containers' Security Context runAsUser | `1001` | +| `dagProcessor.containerSecurityContext.runAsGroup` | Set Airflow Dag Processor containers' Security Context runAsGroup | `1001` | +| `dagProcessor.containerSecurityContext.runAsNonRoot` | Set Airflow Dag Processor containers' Security Context runAsNonRoot | `true` | +| `dagProcessor.containerSecurityContext.privileged` | Set Airflow Dag Processor container's Security Context privileged | `false` | +| `dagProcessor.containerSecurityContext.allowPrivilegeEscalation` | Set Airflow Dag Processor container's Security Context allowPrivilegeEscalation | `false` | +| `dagProcessor.containerSecurityContext.readOnlyRootFilesystem` | Set Airflow Dag Processor container's Security Context readOnlyRootFilesystem | `true` | +| `dagProcessor.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `dagProcessor.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `dagProcessor.lifecycleHooks` | for the Airflow Dag Processor containers to automate configuration before or after startup | `{}` | +| `dagProcessor.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `dagProcessor.hostAliases` | Deployment pod host aliases | `[]` | +| `dagProcessor.podLabels` | Add extra labels to the Airflow Dag Processor pods | `{}` | +| `dagProcessor.podAnnotations` | Add extra annotations to the Airflow Dag Processor pods | `{}` | +| `dagProcessor.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `dagProcessor.affinity` | Affinity for Airflow Dag Processor pods assignment (evaluated as a template) | `{}` | +| `dagProcessor.nodeAffinityPreset.key` | Node label key to match. Ignored if `dagProcessor.affinity` is set. | `""` | +| `dagProcessor.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dagProcessor.nodeAffinityPreset.values` | Node label values to match. Ignored if `dagProcessor.affinity` is set. | `[]` | +| `dagProcessor.nodeSelector` | Node labels for Airflow Dag Processor pods assignment | `{}` | +| `dagProcessor.podAffinityPreset` | Pod affinity preset. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `dagProcessor.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `dagProcessor.tolerations` | Tolerations for Airflow Dag Processor pods assignment | `[]` | +| `dagProcessor.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `dagProcessor.priorityClassName` | Priority Class Name | `""` | +| `dagProcessor.schedulerName` | Use an alternate K8s scheduler, e.g. "stork". | `""` | +| `dagProcessor.terminationGracePeriodSeconds` | Seconds Airflow Dag Processor pod needs to terminate gracefully | `""` | +| `dagProcessor.updateStrategy.type` | Airflow Dag Processor deployment strategy type | `RollingUpdate` | +| `dagProcessor.updateStrategy.rollingUpdate` | Airflow Dag Processor deployment rolling update configuration parameters | `{}` | +| `dagProcessor.sidecars` | Add additional sidecar containers to the Airflow Dag Processor pods | `[]` | +| `dagProcessor.initContainers` | Add additional init containers to the Airflow Dag Processor pods | `[]` | +| `dagProcessor.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow Dag Processor containers | `[]` | +| `dagProcessor.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow Dag Processor pods | `[]` | +| `dagProcessor.pdb.create` | Deploy a pdb object for the Airflow Dag Processor pods | `true` | +| `dagProcessor.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow Dag Processor replicas | `""` | +| `dagProcessor.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow Dag Processor replicas | `""` | +| `dagProcessor.autoscaling.vpa.enabled` | Enable VPA for Airflow Dag Processor | `false` | +| `dagProcessor.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `dagProcessor.autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `dagProcessor.autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `dagProcessor.autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `dagProcessor.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `dagProcessor.autoscaling.hpa.enabled` | Enable HPA for Airflow Dag Processor | `false` | +| `dagProcessor.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `dagProcessor.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `dagProcessor.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `dagProcessor.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `dagProcessor.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `dagProcessor.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `dagProcessor.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `dagProcessor.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dagProcessor.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dagProcessor.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `dagProcessor.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow Triggerer parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `triggerer.enabled` | Run Airflow Triggerer as a standalone component | `true` | +| `triggerer.defaultCapacity` | How many triggers a single Triggerer can run at once | `1000` | +| `triggerer.replicaCount` | Number of Airflow Triggerer replicas | `1` | +| `triggerer.command` | Override default Airflow Triggerer cmd | `[]` | +| `triggerer.args` | Override default Airflow Triggerer args | `[]` | +| `triggerer.extraEnvVars` | Add extra environment variables to Airflow Triggerer containers | `[]` | +| `triggerer.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `triggerer.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `triggerer.containerPorts.logs` | Airflow Triggerer logs container port | `8794` | +| `triggerer.livenessProbe.enabled` | Enable livenessProbe on Airflow Triggerer containers | `true` | +| `triggerer.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `triggerer.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `triggerer.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `15` | +| `triggerer.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `triggerer.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `triggerer.readinessProbe.enabled` | Enable readinessProbe on Airflow Triggerer containers | `true` | +| `triggerer.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `triggerer.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `triggerer.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `15` | +| `triggerer.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `triggerer.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `triggerer.startupProbe.enabled` | Enable startupProbe on Airflow Triggerer containers | `false` | +| `triggerer.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `triggerer.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `triggerer.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `triggerer.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `triggerer.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `triggerer.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `triggerer.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `triggerer.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `triggerer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if triggerer.resources is set (triggerer.resources is recommended for production). | `small` | +| `triggerer.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `triggerer.podSecurityContext.enabled` | Enabled Airflow Triggerer pods' Security Context | `true` | +| `triggerer.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `triggerer.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `triggerer.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `triggerer.podSecurityContext.fsGroup` | Set Airflow Triggerer pod's Security Context fsGroup | `1001` | +| `triggerer.containerSecurityContext.enabled` | Enabled Airflow Triggerer containers' Security Context | `true` | +| `triggerer.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `triggerer.containerSecurityContext.runAsUser` | Set Airflow Triggerer containers' Security Context runAsUser | `1001` | +| `triggerer.containerSecurityContext.runAsGroup` | Set Airflow Triggerer containers' Security Context runAsGroup | `1001` | +| `triggerer.containerSecurityContext.runAsNonRoot` | Set Airflow Triggerer containers' Security Context runAsNonRoot | `true` | +| `triggerer.containerSecurityContext.privileged` | Set Airflow Triggerer container's Security Context privileged | `false` | +| `triggerer.containerSecurityContext.allowPrivilegeEscalation` | Set Airflow Triggerer container's Security Context allowPrivilegeEscalation | `false` | +| `triggerer.containerSecurityContext.readOnlyRootFilesystem` | Set Airflow Triggerer container's Security Context readOnlyRootFilesystem | `true` | +| `triggerer.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `triggerer.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `triggerer.lifecycleHooks` | for the Airflow Triggerer containers to automate configuration before or after startup | `{}` | +| `triggerer.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `triggerer.hostAliases` | Deployment pod host aliases | `[]` | +| `triggerer.podLabels` | Add extra labels to the Airflow Triggerer pods | `{}` | +| `triggerer.podAnnotations` | Add extra annotations to the Airflow Triggerer pods | `{}` | +| `triggerer.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `triggerer.affinity` | Affinity for Airflow Triggerer pods assignment (evaluated as a template) | `{}` | +| `triggerer.nodeAffinityPreset.key` | Node label key to match. Ignored if `triggerer.affinity` is set. | `""` | +| `triggerer.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `triggerer.nodeAffinityPreset.values` | Node label values to match. Ignored if `triggerer.affinity` is set. | `[]` | +| `triggerer.nodeSelector` | Node labels for Airflow Triggerer pods assignment | `{}` | +| `triggerer.podAffinityPreset` | Pod affinity preset. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `triggerer.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `triggerer.tolerations` | Tolerations for Airflow Triggerer pods assignment | `[]` | +| `triggerer.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `triggerer.priorityClassName` | Priority Class Name | `""` | +| `triggerer.schedulerName` | Use an alternate K8s scheduler, e.g. "stork". | `""` | +| `triggerer.terminationGracePeriodSeconds` | Seconds Airflow Triggerer pod needs to terminate gracefully | `""` | +| `triggerer.podManagementPolicy` | Pod management policy for the Airflow Triggerer statefulset | `OrderedReady` | +| `triggerer.updateStrategy.type` | Airflow Triggerer statefulset strategy type | `RollingUpdate` | +| `triggerer.updateStrategy.rollingUpdate` | Airflow Triggerer statefulset rolling update configuration parameters | `{}` | +| `triggerer.sidecars` | Add additional sidecar containers to the Airflow Triggerer pods | `[]` | +| `triggerer.initContainers` | Add additional init containers to the Airflow Triggerer pods | `[]` | +| `triggerer.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow Triggerer containers | `[]` | +| `triggerer.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow Triggerer pods | `[]` | +| `triggerer.pdb.create` | Deploy a pdb object for the Airflow Triggerer pods | `true` | +| `triggerer.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow Triggerer replicas | `""` | +| `triggerer.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow Triggerer replicas | `""` | +| `triggerer.autoscaling.vpa.enabled` | Enable VPA for Airflow Triggerer | `false` | +| `triggerer.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `triggerer.autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `triggerer.autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `triggerer.autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `triggerer.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `triggerer.autoscaling.hpa.enabled` | Enable HPA | `false` | +| `triggerer.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `triggerer.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `triggerer.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `triggerer.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `triggerer.persistence.enabled` | Enable logs persistence using Persistent Volume Claims | `true` | +| `triggerer.persistence.storageClass` | Storage class of backing PVC | `""` | +| `triggerer.persistence.annotations` | Additional Persistent Volume Claim annotations | `{}` | +| `triggerer.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `triggerer.persistence.size` | Size of logs volume | `8Gi` | +| `triggerer.persistence.selector` | Selector to match an existing Persistent Volume for WordPress data PVC | `{}` | +| `triggerer.persistence.dataSource` | Custom PVC data source | `{}` | +| `triggerer.persistence.existingClaim` | The name of an existing PVC to use for persistence (only if triggerer.replicaCount=1) | `""` | +| `triggerer.persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` | +| `triggerer.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `triggerer.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | +| `triggerer.service.type` | Airflow Triggerer service type | `ClusterIP` | +| `triggerer.service.ports.logs` | Airflow Triggerer service logs port | `8794` | +| `triggerer.service.nodePorts.logs` | Node port for Airflow Triggerer service logs | `""` | +| `triggerer.service.clusterIP` | Airflow Triggerer service Cluster IP | `""` | +| `triggerer.service.loadBalancerIP` | Airflow Triggerer service Load Balancer IP | `""` | +| `triggerer.service.loadBalancerSourceRanges` | Airflow Triggerer service Load Balancer sources | `[]` | +| `triggerer.service.externalTrafficPolicy` | Airflow Triggerer service external traffic policy | `Cluster` | +| `triggerer.service.annotations` | Additional custom annotations for Airflow Triggerer service | `{}` | +| `triggerer.service.extraPorts` | Extra ports to expose in Airflow Triggerer service (normally used with the `triggerer.sidecars` value) | `[]` | +| `triggerer.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `triggerer.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `triggerer.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `triggerer.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `triggerer.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `triggerer.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `triggerer.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `triggerer.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `triggerer.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow worker parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `worker.command` | Override default container command (useful when using custom images) | `[]` | +| `worker.args` | Override default container args (useful when using custom images) | `[]` | +| `worker.extraEnvVars` | Array with extra environment variables to add Airflow worker pods | `[]` | +| `worker.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow worker pods | `""` | +| `worker.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow worker pods | `""` | +| `worker.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow worker pods | `[]` | +| `worker.containerPorts.http` | Airflow worker HTTP container port | `8793` | +| `worker.replicaCount` | Number of Airflow worker replicas | `1` | +| `worker.livenessProbe.enabled` | Enable livenessProbe on Airflow worker containers | `true` | +| `worker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `worker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `worker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `worker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `worker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `worker.readinessProbe.enabled` | Enable readinessProbe on Airflow worker containers | `true` | +| `worker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `worker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `worker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `worker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `worker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `worker.startupProbe.enabled` | Enable startupProbe on Airflow worker containers | `false` | +| `worker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `worker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `worker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `worker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `worker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `worker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `worker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `worker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `worker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if worker.resources is set (worker.resources is recommended for production). | `large` | +| `worker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `worker.podSecurityContext.enabled` | Enabled Airflow worker pods' Security Context | `true` | +| `worker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `worker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `worker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `worker.podSecurityContext.fsGroup` | Set Airflow worker pod's Security Context fsGroup | `1001` | +| `worker.containerSecurityContext.enabled` | Enabled Airflow worker containers' Security Context | `true` | +| `worker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `worker.containerSecurityContext.runAsUser` | Set Airflow worker containers' Security Context runAsUser | `1001` | +| `worker.containerSecurityContext.runAsGroup` | Set Airflow worker containers' Security Context runAsGroup | `1001` | +| `worker.containerSecurityContext.runAsNonRoot` | Set Airflow worker containers' Security Context runAsNonRoot | `true` | +| `worker.containerSecurityContext.privileged` | Set worker container's Security Context privileged | `false` | +| `worker.containerSecurityContext.allowPrivilegeEscalation` | Set worker container's Security Context allowPrivilegeEscalation | `false` | +| `worker.containerSecurityContext.readOnlyRootFilesystem` | Set worker container's Security Context readOnlyRootFilesystem | `true` | +| `worker.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `worker.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `worker.lifecycleHooks` | for the Airflow worker container(s) to automate configuration before or after startup | `{}` | +| `worker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `worker.hostAliases` | Deployment pod host aliases | `[]` | +| `worker.podLabels` | Add extra labels to the Airflow worker pods | `{}` | +| `worker.podAnnotations` | Add extra annotations to the Airflow worker pods | `{}` | +| `worker.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `worker.affinity` | Affinity for Airflow worker pods assignment (evaluated as a template) | `{}` | +| `worker.nodeAffinityPreset.key` | Node label key to match. Ignored if `worker.affinity` is set. | `""` | +| `worker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `worker.nodeAffinityPreset.values` | Node label values to match. Ignored if `worker.affinity` is set. | `[]` | +| `worker.nodeSelector` | Node labels for Airflow worker pods assignment | `{}` | +| `worker.podAffinityPreset` | Pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `worker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `worker.tolerations` | Tolerations for Airflow worker pods assignment | `[]` | +| `worker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `worker.priorityClassName` | Priority Class Name | `""` | +| `worker.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `worker.terminationGracePeriodSeconds` | Seconds Airflow worker pod needs to terminate gracefully | `""` | +| `worker.podManagementPolicy` | Pod management policy for the worker statefulset | `OrderedReady` | +| `worker.updateStrategy.type` | Airflow worker statefulset strategy type | `RollingUpdate` | +| `worker.updateStrategy.rollingUpdate` | Airflow worker statefulset rolling update configuration parameters | `{}` | +| `worker.sidecars` | Add additional sidecar containers to the Airflow worker pods | `[]` | +| `worker.initContainers` | Add additional init containers to the Airflow worker pods | `[]` | +| `worker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow worker pods | `[]` | +| `worker.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow worker pods | `[]` | +| `worker.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the Airflow worker statefulset | `[]` | +| `worker.podTemplate` | Template to replace the default one to be use when `executor=KubernetesExecutor` to create Airflow worker pods | `{}` | +| `worker.pdb.create` | Deploy a pdb object for the Airflow worker pods | `true` | +| `worker.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow worker replicas | `""` | +| `worker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow worker replicas | `""` | +| `worker.autoscaling.enabled` | DEPRECATED: use worker.autoscaling.hpa.enabled instead | `false` | +| `worker.autoscaling.minReplicas` | DEPRECATED: use worker.autoscaling.hpa.minReplicas instead | `""` | +| `worker.autoscaling.maxReplicas` | DEPRECATED: use worker.autoscaling.hpa.maxReplicas instead | `""` | +| `worker.autoscaling.targetMemory` | DEPRECATED: use worker.autoscaling.hpa.targetMemory instead | `""` | +| `worker.autoscaling.targetCPU` | DEPRECATED: use worker.autoscaling.hpa.targetCPU instead | `""` | +| `worker.autoscaling.vpa.enabled` | Enable VPA for Airflow Worker | `false` | +| `worker.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `worker.autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `worker.autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `worker.autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `worker.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `worker.autoscaling.hpa.enabled` | Enable HPA for Airflow Worker | `false` | +| `worker.autoscaling.hpa.minReplicas` | Minimum number of replicas | `1` | +| `worker.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `3` | +| `worker.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `80` | +| `worker.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `80` | +| `worker.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `worker.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `worker.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `worker.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `worker.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `worker.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `worker.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow "setup-db" K8s Job parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `setupDBJob.enabled` | Enable setting up the Airflow database using a K8s job (otherwise it's done by the Webserver on startup) | `true` | +| `setupDBJob.backoffLimit` | set backoff limit of the job | `10` | +| `setupDBJob.command` | Override default container command on "setup-db" job's containers | `[]` | +| `setupDBJob.args` | Override default container args on "setup-db" job's containers | `[]` | +| `setupDBJob.containerSecurityContext.enabled` | Enabled "setup-db" job's containers' Security Context | `true` | +| `setupDBJob.containerSecurityContext.seLinuxOptions` | Set SELinux options in "setup-db" job's containers | `{}` | +| `setupDBJob.containerSecurityContext.runAsUser` | Set runAsUser in "setup-db" job's containers' Security Context | `1001` | +| `setupDBJob.containerSecurityContext.runAsGroup` | Set runAsUser in "setup-db" job's containers' Security Context | `1001` | +| `setupDBJob.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "setup-db" job's containers' Security Context | `true` | +| `setupDBJob.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "setup-db" job's containers' Security Context | `true` | +| `setupDBJob.containerSecurityContext.privileged` | Set privileged in "setup-db" job's containers' Security Context | `false` | +| `setupDBJob.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "setup-db" job's containers' Security Context | `false` | +| `setupDBJob.containerSecurityContext.capabilities.add` | List of capabilities to be added in "setup-db" job's containers | `[]` | +| `setupDBJob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "setup-db" job's containers | `["ALL"]` | +| `setupDBJob.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "setup-db" job's containers | `RuntimeDefault` | +| `setupDBJob.podSecurityContext.enabled` | Enabled "setup-db" job's pods' Security Context | `true` | +| `setupDBJob.podSecurityContext.fsGroupChangePolicy` | Set fsGroupChangePolicy in "setup-db" job's pods' Security Context | `Always` | +| `setupDBJob.podSecurityContext.sysctls` | List of sysctls to allow in "setup-db" job's pods' Security Context | `[]` | +| `setupDBJob.podSecurityContext.supplementalGroups` | List of supplemental groups to add to "setup-db" job's pods' Security Context | `[]` | +| `setupDBJob.podSecurityContext.fsGroup` | Set fsGroup in "setup-db" job's pods' Security Context | `1001` | +| `setupDBJob.extraEnvVars` | Array containing extra env vars to configure the Airflow "setup-db" job's container | `[]` | +| `setupDBJob.extraEnvVarsCM` | ConfigMap containing extra env vars to configure the Airflow "setup-db" job's container | `""` | +| `setupDBJob.extraEnvVarsSecret` | Secret containing extra env vars to configure the Airflow "setup-db" job's container (in case of sensitive data) | `""` | +| `setupDBJob.resourcesPreset` | Set Airflow "setup-db" job's container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if setupDBJob.resources is set (setupDBJob.resources is recommended for production). | `small` | +| `setupDBJob.resources` | Set Airflow "setup-db" job's container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `setupDBJob.automountServiceAccountToken` | Mount Service Account token in Airflow "setup-db" job's pods | `false` | +| `setupDBJob.hostAliases` | Add deployment host aliases | `[]` | +| `setupDBJob.annotations` | Add annotations to the Airflow "setup-db" job | `{}` | +| `setupDBJob.podLabels` | Additional pod labels for Airflow "setup-db" job | `{}` | +| `setupDBJob.podAnnotations` | Additional pod annotations for Airflow "setup-db" job | `{}` | +| `setupDBJob.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `setupDBJob.affinity` | Affinity for Airflow setup-db pods assignment (evaluated as a template) | `{}` | +| `setupDBJob.nodeAffinityPreset.key` | Node label key to match. Ignored if `setupDBJob.affinity` is set. | `""` | +| `setupDBJob.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `setupDBJob.nodeAffinityPreset.values` | Node label values to match. Ignored if `setupDBJob.affinity` is set. | `[]` | +| `setupDBJob.nodeSelector` | Node labels for Airflow setup-db pods assignment | `{}` | +| `setupDBJob.podAffinityPreset` | Pod affinity preset. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `setupDBJob.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `setupDBJob.tolerations` | Tolerations for Airflow setup-db pods assignment | `[]` | +| `setupDBJob.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `setupDBJob.priorityClassName` | Priority Class Name | `""` | +| `setupDBJob.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `setupDBJob.terminationGracePeriodSeconds` | Seconds Airflow setup-db pod needs to terminate gracefully | `""` | +| `setupDBJob.extraVolumes` | Optionally specify extra list of additional volumes for Airflow "setup-db" job's pods | `[]` | +| `setupDBJob.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow "setup-db" job's containers | `[]` | +| `setupDBJob.initContainers` | Add additional init containers to the Airflow "setup-db" job's pods | `[]` | + +### Airflow ldap parameters + +| Name | Description | Value | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.uri` | Server URI, eg. ldap://ldap_server:389 | `ldap://ldap_server:389` | +| `ldap.basedn` | Base of the search, eg. ou=example,o=org. | `dc=example,dc=org` | +| `ldap.searchAttribute` | if doing an indirect bind to ldap, this is the field that matches the username when searching for the account to bind to | `cn` | +| `ldap.binddn` | DN of the account used to search in the LDAP server. | `cn=admin,dc=example,dc=org` | +| `ldap.bindpw` | Bind Password | `""` | +| `ldap.existingSecret` | Name of an existing secret containing the LDAP bind password | `""` | +| `ldap.userRegistration` | Set to True to enable user self registration | `True` | +| `ldap.userRegistrationRole` | Set role name to be assign when a user registers himself. This role must already exist. Mandatory when using ldap.userRegistration | `Public` | +| `ldap.rolesMapping` | mapping from LDAP DN to a list of roles | `{ "cn=All,ou=Groups,dc=example,dc=org": ["User"], "cn=Admins,ou=Groups,dc=example,dc=org": ["Admin"], }` | +| `ldap.rolesSyncAtLogin` | replace ALL the user's roles each login, or only on registration | `True` | +| `ldap.tls.enabled` | Enabled TLS/SSL for LDAP, you must include the CA file. | `false` | +| `ldap.tls.allowSelfSigned` | Allow to use self signed certificates | `true` | +| `ldap.tls.certificatesSecret` | Name of the existing secret containing the certificate CA file that will be used by ldap client | `""` | +| `ldap.tls.certificatesMountPath` | Where LDAP certifcates are mounted. | `/opt/drycc/airflow/conf/certs` | +| `ldap.tls.CAFilename` | LDAP CA cert filename | `""` | + +### Traffic Exposure Parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Airflow service type | `ClusterIP` | +| `service.ports.http` | Airflow service HTTP port | `8080` | +| `service.nodePorts.http` | Node port for HTTP | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Airflow service Cluster IP | `""` | +| `service.loadBalancerIP` | Airflow service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Airflow service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Airflow service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Airflow service | `{}` | +| `service.extraPorts` | Extra port to expose on Airflow service | `[]` | +| `ingress.enabled` | Enable ingress record generation for Airflow | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `airflow.local` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Airflow pods | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | + +### StatsD metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- | +| `metrics.enabled` | Enable a StatsD exporter that collects StatsD metrics from Airflow components and expose them as Prometheus metrics | `false` | +| `metrics.image.registry` | StatsD exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | StatsD exporter image repository | `REPOSITORY_NAME/statsd-exporter` | +| `metrics.image.digest` | StatsD exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | StatsD exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | StatsD exporter image pull secrets | `[]` | +| `metrics.configuration` | Specify content for StatsD exporter's mappings.yml | `""` | +| `metrics.existingConfigmap` | Name of an existing config map containing the StatsD exporter's mappings.yml | `""` | +| `metrics.containerPorts.ingest` | StatsD exporter ingest container port (used for the metrics ingestion from Airflow components) | `9125` | +| `metrics.containerPorts.metrics` | StatsD exporter metrics container port (used to expose Prometheus metrics) | `9102` | +| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `nano` | +| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `metrics.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `metrics.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `metrics.podSecurityContext.fsGroup` | Set StatsD exporter pod's Security Context fsGroup | `1001` | +| `metrics.containerSecurityContext.enabled` | Enable StatsD exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.containerSecurityContext.runAsUser` | Set StatsD exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsGroup` | Set StatsD exporter containers' Security Context runAsGroup | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set StatsD exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set StatsD exporter containers' Security Context privileged | `false` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set StatsD exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set StatsD exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set containers' Security Context seccomp profile | `RuntimeDefault` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on StatsD exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on StatsD exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on StatsD exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.lifecycleHooks` | for the StatsD exporter containers' to automate configuration before or after startup | `{}` | +| `metrics.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `metrics.hostAliases` | StatsD exporter pods host aliases | `[]` | +| `metrics.podLabels` | Extra labels for StatsD exporter pods | `{}` | +| `metrics.podAnnotations` | Extra annotations for StatsD exporter pods | `{}` | +| `metrics.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `metrics.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.affinity` is set. | `""` | +| `metrics.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.affinity` is set. | `[]` | +| `metrics.affinity` | Affinity for StatsD exporter pods assignment | `{}` | +| `metrics.nodeSelector` | Node labels for StatsD exporter pods assignment | `{}` | +| `metrics.priorityClassName` | StatsD exporter pods' priorityClassName | `""` | +| `metrics.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `metrics.schedulerName` | Name of the k8s scheduler (other than default) for StatsD exporter | `""` | +| `metrics.terminationGracePeriodSeconds` | Seconds StatsD exporter pod needs to terminate gracefully | `""` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the StatsD exporter pods | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the StatsD exporter containers | `[]` | +| `metrics.service.ports.ingest` | StatsD exporter ingest service port (used for the metrics ingestion from Airflow components) | `9125` | +| `metrics.service.ports.metrics` | StatsD exporter metrics service port (used to expose Prometheus metrics) | `9102` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for the StatsD metrics service | `{}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `metrics.networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `metrics.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `metrics.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `metrics.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `metrics.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `metrics.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Airflow database parameters + +| Name | Description | Value | +| ------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | +| `postgresql.enabled` | Switch to enable or disable the PostgreSQL helm chart | `true` | +| `postgresql.auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `postgresql.auth.username` | Name for a custom user to create | `bn_airflow` | +| `postgresql.auth.password` | Password for the custom user to create | `""` | +| `postgresql.auth.database` | Name for a custom database to create | `bitnami_airflow` | +| `postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials | `""` | +| `postgresql.architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `postgresql.primary.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). | `nano` | +| `postgresql.primary.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `externalDatabase.host` | Database host (ignored if externalDatabase.sqlConnection is set) | `localhost` | +| `externalDatabase.port` | Database port number (ignored if externalDatabase.sqlConnection is set) | `5432` | +| `externalDatabase.user` | Non-root username for Airflow (ignored if externalDatabase.sqlConnection is set) | `bn_airflow` | +| `externalDatabase.password` | Password for the non-root username for Airflow (ignored if externalDatabase.sqlConnection or externalDatabase.existingSecret are set) | `""` | +| `externalDatabase.database` | Airflow database name (ignored if externalDatabase.sqlConnection is set) | `bitnami_airflow` | +| `externalDatabase.sqlConnection` | SQL connection string | `""` | +| `externalDatabase.existingSecret` | Name of an existing secret resource containing the database credentials | `""` | +| `externalDatabase.existingSecretPasswordKey` | Name of an existing secret key containing the database credentials (ignored if externalDatabase.existingSecretSqlConnectionKey is set) | `""` | +| `externalDatabase.existingSecretSqlConnectionKey` | Name of an existing secret key containing the SQL connection string | `""` | +| `redis.enabled` | Switch to enable or disable the Redis® helm | `true` | +| `redis.auth.enabled` | Enable password authentication | `true` | +| `redis.auth.password` | Redis® password | `""` | +| `redis.auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` | +| `redis.architecture` | Redis® architecture. Allowed values: `standalone` or `replication` | `standalone` | +| `redis.master.service.ports.redis` | Redis® port | `6379` | +| `redis.master.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). | `nano` | +| `redis.master.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `externalRedis.host` | Redis® host | `localhost` | +| `externalRedis.port` | Redis® port number | `6379` | +| `externalRedis.username` | Redis® username | `""` | +| `externalRedis.password` | Redis® password | `""` | +| `externalRedis.existingSecret` | Name of an existing secret resource containing the Redis&trade credentials | `""` | +| `externalRedis.existingSecretPasswordKey` | Name of an existing secret key containing the Redis&trade credentials | `""` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.username=my-user \ + --set auth.password=my-passsword \ + --set auth.fernetKey=my-fernet-key \ + --set auth.secretKey=my-secret-key \ + oci://REGISTRY_NAME/REPOSITORY_NAME/airflow +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the credentials to access the Airflow web UI. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/airflow +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/airflow/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 24.0.0 + +This major updates the Redis® subchart to its newest major, 21.0.0, which updates Redis® from 7.4 to 8.0. [Here](https://redis.io/docs/latest/operate/oss_and_stack/install/upgrade/cluster/) you can find more information about the changes introduced in that version. No major issues are expected during the upgrade. + +### To 23.0.0 + +This major release adds support for Airflow `3.x.y` series. Additionally, previous Airflow `2.x.y` series can be deployed by setting the corresponding image parameters. The chart logic will detect which image version you are using, and it will generate the required Airflow configuration and Kubernetes objects. + +We recommend following the next procedure in order to upgrade from `22.x.y` chart version to the `23.x.y` series, and also upgrade to Airflow `3.y.z` series: + +- Upgrade your release (maintaining Airflow `2.x.y` series): + +```console +helm upgrade airflow oci://REGISTRY_NAME/REPOSITORY_NAME/airflow --set image.tag=2 +``` + +- Follow the recommended steps for the database backup and the DAGs files verification available at the [official "upgrading to Airflow 3" guide](https://airflow.apache.org/docs/apache-airflow/stable/installation/upgrading_to_airflow3.html). + +- Upgrade your release now using the default Airflow `3.x.y` series: + +```console +helm upgrade airflow oci://REGISTRY_NAME/REPOSITORY_NAME/airflow +``` + +### To 22.4.0 + +This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). + +### To 22.2.0 + +This minor version no longer expects custom Airflow configuration (set via the `configuration` parameter) to be provided as a string. Instead, it expects a dictionary with the configuration sections/keys/values. Find more info in the [section](#airflow-configuration-file) above. + +### To 22.0.0 + +This major version replaces exposing Prometheus metrics using the [Airflow prometheus exporter](https://github.com/PBWebMedia/airflow-prometheus-exporter), that exposes metrics based on the data retrieved from the database, by configuring Airflow components to send StatsD metrics to the [StatsD exporter](https://github.com/prometheus/statsd_exporter) that transforms them into Prometheus metrics. Find more information about this approach in the [Apache Airflow official documentation](https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/metrics.html#setup-statsd). + +No upgrades issues are expected when upgrading from `21.x.x` but existing dashboards and alerts based on the previous metrics should be adapted to the new ones. + +### To 21.0.0 + +This major version uses a single container image ([`bitnami/airflow`](https://github.com/bitnami/containers/tree/main/bitnami/airflow) by default) to run every Airflow component (Webserver, Scheduler and Worker) so `bitnami/airflow-scheduler` and `bitnami/airflow-worker` images are no longer necessary. Also, operations to load custom DAGs and plugins via init containers also use this same image so `bitnami/git` and `bitnami/os-shell` are no longer necessary either. These changes implies several simplifications in the chart values: + +- New `image.*` parameters are introduced to configure the container image used to run the Airflow components. +- `web.image.*`, `scheduler.image.*` and `worker.image.*` parameters are removed. +- `dags.image.*` and `git.image.*` parameters are removed. + +Some other simplifications are introduced around adding custom DAGs and plugins: + +- `dags.*` and `git.dags.*` parameters are merged into a single `dags.*` parameter. +- `git.plugins.*` parameter are renamed to `plugins.*`. +- `git.clone.*` and `git.sync.` parameters are now available under `defaultInitContainers.loadDAGsPlugins.*` and `defaultSidecars.syncDAGsPlugins.*`, respectively. + +No upgrades issues are expected when upgrading from `20.x.x` if DAGs and plugins related parameters are properly adapted as described above. + +### To 20.0.0 + +This major updates the PostgreSQL subchart to its newest major, 16.0.0, which uses PostgreSQL 17.x. Follow the [official instructions](https://www.postgresql.org/docs/17/upgrading.html) to upgrade to 17.x. + +### To 19.0.0 + +This major updates the Redis® subchart to its newest major, 20.0.0. [Here](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-2000) you can find more information about the changes introduced in that version. + +### To 18.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +### To 17.0.0 + +This major release bumps the PostgreSQL chart version to [14.x.x](https://github.com/bitnami/charts/pull/22750); no major issues are expected during the upgrade. + +### To 16.0.0 + +This major updates the PostgreSQL subchart to its newest major, 13.0.0. [Here](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1300) you can find more information about the changes introduced in that version. + +### To 15.0.0 + +This major updates the Redis® subchart to its newest major, 18.0.0. [Here](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-1800) you can find more information about the changes introduced in that version. + +NOTE: Due to an error in our release process, Redis®' chart versions higher or equal than 17.15.4 already use Redis® 7.2 by default. + +### To 14.0.0 + +This major updates the PostgreSQL subchart to its newest major, 12.0.0. [Here](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1200) you can find more information about the changes introduced in that version. + +### To 13.0.0 + +This major update the Redis® subchart to its newest major, 17.0.0, which updates Redis® from its version 6.2 to the latest 7.0. + +### To 12.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Additionally updates the PostgreSQL & Redis subcharts to their newest major 11.x.x and 16.x.x, respectively, which contain similar changes. + +- *auth.forcePassword* parameter is deprecated. The new version uses Helm's lookup functionalities and forcing passwords isn't required anymore. +- *config* and *configurationConfigMap* have been renamed to *configuration* and *existingConfigmap*, respectively. +- *dags.configMap* and *web.configMap* have been renamed to *dags.existingConfigmap* and *web.existingConfigmap*, respectively. +- *web.containerPort* and *worker.port* have been regrouped under the *web.containerPorts* and *worker.containerPorts* maps, respectively. +- *web.podDisruptionBudget*, *scheduler.podDisruptionBudget* and *worker.podDisruptionBudget* maps have been renamed to *web.pdb*, *scheduler.pdb* and *worker.pdb*, respectively. +- *worker.autoscaling.replicas.min*, *worker.autoscaling.replicas.max*, *worker.autoscaling.targets.cpu* and *worker.autoscaling.targets.memory* have been renamed to *worker.autoscaling.minReplicas*, *worker.autoscaling.maxReplicas*, *worker.autoscaling.targetCPU* and *.Values.worker.autoscaling.targetMemory*, respectively. +- *service.port* and *service.httpsPort* have been regrouped under the *service.ports* map. +- *ingress* map is completely redefined. +- *metrics.service.port* has been regrouped under the *metrics.service.ports* map. +- Support for Network Policies is dropped and it'll be properly added in the future. +- The secret keys *airflow-fernetKey* and *airflow-secretKey* were renamed to *airflow-fernet-key* and *airflow-secret-key*, respectively. + +#### How to upgrade to version 12.0.0 + +To upgrade to *12.0.0* from *11.x*, it should be done reusing the PVC(s) used to hold the data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is *airflow* and the release namespace *default*): + +> NOTE: Please, create a backup of your database before running any of those actions. + +1. Obtain the credentials and the names of the PVCs used to hold the data on your current release: + +```console + export AIRFLOW_PASSWORD=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-password}" | base64 --decode) + export AIRFLOW_FERNET_KEY=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-fernetKey}" | base64 --decode) + export AIRFLOW_SECRET_KEY=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-secretKey}" | base64 --decode) + export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default airflow-postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) + export REDIS_PASSWORD=$(kubectl get secret --namespace default airflow-redis -o jsonpath="{.data.redis-password}" | base64 --decode) + export POSTGRESQL_PVC=$(kubectl get pvc -l app.kubernetes.io/instance=airflow,app.kubernetes.io/name=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}") +``` + +1. Delete the Airflow worker & PostgreSQL statefulset (notice the option *--cascade=false*) and secrets: + +```console + kubectl delete statefulsets.apps --cascade=false airflow-postgresql + kubectl delete statefulsets.apps --cascade=false airflow-worker + kubectl delete secret postgresql --namespace default + kubectl delete secret airflow --namespace default +``` + +1. Upgrade your release using the same PostgreSQL version: + +```console + CURRENT_PG_VERSION=$(kubectl exec airflow-postgresql-0 -- bash -c 'echo $BITNAMI_IMAGE_VERSION') + helm upgrade airflow bitnami/airflow \ + --set loadExamples=true \ + --set web.baseUrl=http://127.0.0.1:8080 \ + --set auth.password=$AIRFLOW_PASSWORD \ + --set auth.fernetKey=$AIRFLOW_FERNET_KEY \ + --set auth.secretKey=$AIRFLOW_SECRET_KEY \ + --set postgresql.image.tag=$CURRENT_VERSION \ + --set postgresql.auth.password=$POSTGRESQL_PASSWORD \ + --set postgresql.persistence.existingClaim=$POSTGRESQL_PVC \ + --set redis.password=$REDIS_PASSWORD \ + --set redis.cluster.enabled=true +``` + +1. Delete the existing Airflow worker & PostgreSQL pods and the new statefulset will create a new one: + +```console + kubectl delete pod airflow-postgresql-0 + kubectl delete pod airflow-worker-0 +``` + +### To 11.0.0 + +This major update the Redis® subchart to its newest major, 15.0.0. [Here](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-1500) you can find more info about the specific changes. + +### To 10.0.0 + +This major updates the Redis® subchart to it newest major, 14.0.0, which contains breaking changes. For more information on this subchart's major and the steps needed to migrate your data from your previous release, please refer to [Redis® upgrade notes.](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-1400). + +### To 7.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. The following changes were introduced in this version: + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running *helm dependency update*, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Chart. +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The image objects have been moved to its corresponding component object, e.g: *workerImage* now is located at *worker.image*. + - The prefix *airflow* has been removed. Therefore, parameters prefixed with *airflow* are now at root level, e.g. *airflow.loadExamples* now is *loadExamples* or *airflow.worker.resources* now is *worker.resources*. + - Parameters related to the *git* features has completely been refactored: + - They have been regrouped under the *git* map. + - *airflow.cloneDagsFromGit* no longer exists, instead you must use *git.dags* and *git.dags.repositories* has been introduced that will add support for multiple repositories. + - *airflow.clonePluginsFromGit* no longer exists, instead you must use *git.plugins*. *airflow.clonePluginsFromGit.repository*, *airflow.clonePluginsFromGit.branch* and *airflow.clonePluginsFromGit.path* have been removed in favour of *git.dags.repositories*. + - Liveness and readiness probe have been separated by components *airflow.livenessProbe.** and *airflow.readinessProbe* have been removed in favour of *web.livenessProbe*, *worker.livenessProbe*, *web.readinessProbe* and *worker.readinessProbe*. + - *airflow.baseUrl* has been moved to *web.baseUrl*. + - Security context has been migrated to the bitnami standard way so that *securityContext* has been divided into *podSecurityContext* that will define the **fsGroup** for all the containers in the pod and *containerSecurityContext* that will define the user id that will run the main containers. + - *./files/dags/*.py* will not be include in the deployment any more. +- Additionally updates the PostgreSQL & Redis subcharts to their newest major 10.x.x and 11.x.x, respectively, which contain similar changes. + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version does not support Helm v2 anymore. +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3. + +#### Useful links + +- [Bitnami Tutorial](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-resolve-helm2-helm3-post-migration-issues-index.html) +- [Helm docs](https://helm.sh/docs/topics/v2_v3_migration) +- [Helm Blog](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3) + +#### How to upgrade to version 7.0.0 + +To upgrade to *7.0.0* from *6.x*, it should be done reusing the PVC(s) used to hold the data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is *airflow* and the release namespace *default*): + +> NOTE: Please, create a backup of your database before running any of those actions. + +1. Obtain the credentials and the names of the PVCs used to hold the data on your current release: + +```console + export AIRFLOW_PASSWORD=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-password}" | base64 --decode) + export AIRFLOW_FERNET_KEY=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-fernetKey}" | base64 --decode) + export AIRFLOW_SECRET_KEY=$(kubectl get secret --namespace default airflow -o jsonpath="{.data.airflow-secretKey}" | base64 --decode) + export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default airflow-postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) + export REDIS_PASSWORD=$(kubectl get secret --namespace default airflow-redis -o jsonpath="{.data.redis-password}" | base64 --decode) + export POSTGRESQL_PVC=$(kubectl get pvc -l app.kubernetes.io/instance=airflow,app.kubernetes.io/name=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}") +``` + +1. Delete the Airflow worker & PostgreSQL statefulset (notice the option *--cascade=false*): + +```console + kubectl delete statefulsets.apps --cascade=false airflow-postgresql + kubectl delete statefulsets.apps --cascade=false airflow-worker +``` + +1. Upgrade your release: + +> NOTE: Please remember to migrate all the values to its new path following the above notes, e.g: `airflow.loadExamples` -> `loadExamples` or `airflow.baseUrl=http://127.0.0.1:8080` -> `web.baseUrl=http://127.0.0.1:8080`. + +```console + helm upgrade airflow bitnami/airflow \ + --set loadExamples=true \ + --set web.baseUrl=http://127.0.0.1:8080 \ + --set auth.password=$AIRFLOW_PASSWORD \ + --set auth.fernetKey=$AIRFLOW_FERNET_KEY \ + --set auth.secretKey=$AIRFLOW_SECRET_KEY \ + --set postgresql.postgresqlPassword=$POSTGRESQL_PASSWORD \ + --set postgresql.persistence.existingClaim=$POSTGRESQL_PVC \ + --set redis.password=$REDIS_PASSWORD \ + --set redis.cluster.enabled=true +``` + +1. Delete the existing Airflow worker & PostgreSQL pods and the new statefulset will create a new one: + +```console + kubectl delete pod airflow-postgresql-0 + kubectl delete pod airflow-worker-0 +``` + +## License + +Copyright © 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/airflow/3/chart/airflow-3/templates/NOTES.txt b/addons/airflow/3/chart/airflow-3/templates/NOTES.txt new file mode 100644 index 00000000..cbd59f32 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/NOTES.txt @@ -0,0 +1,132 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information. + +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $airflowWebServiceName := include "airflow.web.fullname" . }} +{{- $airflowSecretName := include "airflow.secretName" . }} +{{- $baseUrl := (include "airflow.baseUrl" .) }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} + +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ $releaseNamespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ $releaseNamespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/airflow/entrypoint.sh /opt/drycc/scripts/airflow/run.sh + +{{- else }} + +{{- if and (contains "127.0.0.1" $baseUrl) (not (eq "ClusterIP" .Values.service.type)) }} +############################################################################### +### ERROR: You did not provide an external URL in your 'helm install' call ### +############################################################################### + +This deployment will be incomplete until you configure Airflow with a resolvable +host. To configure Airflow with the URL of your service: + +1. Get the Airflow URL by running: + + {{- if eq "NodePort" .Values.service.type }} + + export AIRFLOW_HOST=$(kubectl get nodes --namespace {{ $releaseNamespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export AIRFLOW_PORT=$(kubectl get --namespace {{ $releaseNamespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ $airflowWebServiceName }}) + + {{- else if eq "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ $releaseNamespace }} -w {{ $airflowWebServiceName }}' + + export AIRFLOW_HOST=$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $airflowWebServiceName }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + export AIRFLOW_PORT=80 + + {{- end }} + +2. Complete your Airflow deployment by running: + + {{ include "common.utils.secret.getvalue" (dict "secret" $airflowSecretName "field" "airflow-password" "context" $) }} + {{ include "common.utils.secret.getvalue" (dict "secret" $airflowSecretName "field" "airflow-fernet-key" "context" $) }} + {{ include "common.utils.secret.getvalue" (dict "secret" $airflowSecretName "field" "airflow-secret-key" "context" $) }} + helm upgrade --namespace {{ $releaseNamespace }} {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/{{ .Chart.Name }} \ + --set service.type={{ .Values.service.type }} \ + --set web.baseUrl=http://$AIRFLOW_HOST:$AIRFLOW_PORT \ + --set auth.password=$AIRFLOW_PASSWORD \ + --set auth.fernetKey=$AIRFLOW_FERNETKEY \ + --set auth.secretKey=$AIRFLOW_SECRETKEY + +{{- else }} + +Airflow can be accessed via port {{ .Values.service.ports.http }} on the following DNS name from within your cluster: + + {{ printf "%s.%s.svc.%s" $airflowWebServiceName $releaseNamespace $clusterDomain }} + +To connect to Airflow from outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} + +1. Get the Airflow URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "Airflow URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else if eq .Values.service.type "ClusterIP" }} + +1. Create a port-forward to the service: + + kubectl port-forward --namespace {{ $releaseNamespace }} svc/{{ $airflowWebServiceName }} {{ .Values.service.ports.http }}:{{ .Values.service.ports.http }} & + echo "Airflow URL: http{{ if .Values.web.tls }}s{{ end }}://127.0.0.1:{{ .Values.service.ports.http }}" + +{{- else if eq .Values.service.type "NodePort" }} + +1. Obtain the NodePort IP and port: + + export NODE_IP=$(kubectl get nodes --namespace {{ $releaseNamespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $airflowWebServiceName }} -o jsonpath="{.spec.ports[0].nodePort}") + echo "Airflow URL: http{{ if .Values.web.tls }}s{{ end }}://${NODE_IP}:$NODE_PORT" + +{{- else if eq .Values.service.type "LoadBalancer" }} + +1. Obtain the LoadBalancer IP: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ $airflowWebServiceName }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ $airflowWebServiceName }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "Airflow URL: http{{ if .Values.web.tls }}s{{ end }}://${SERVICE_IP}:{{ .Values.service.ports.http }}" + +{{- end }} + +2. Open a browser and access Airflow using the obtained URL. + +3. Get your Airflow login credentials by running: + + {{ include "common.utils.secret.getvalue" (dict "secret" $airflowSecretName "field" "airflow-password" "context" $) }} + echo User: {{ .Values.auth.username }} + echo Password: $AIRFLOW_PASSWORD + +{{- end }} +{{- end }} + +{{ include "airflow.validateValues" . }} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.resources" (dict "sections" (list "scheduler" "web" "worker" "defaultSidecars.syncDAGsPlugins" "defaultInitContainers.loadDAGsPlugins" "metrics") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.metrics.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.metrics.image) "context" $) }} diff --git a/addons/airflow/3/chart/airflow-3/templates/_helpers.tpl b/addons/airflow/3/chart/airflow-3/templates/_helpers.tpl new file mode 100644 index 00000000..ca446a26 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/_helpers.tpl @@ -0,0 +1,638 @@ +{{/* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Airflow Web server fullname +*/}} +{{- define "airflow.web.fullname" -}} +{{- printf "%s-web" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow Scheduler fullname +*/}} +{{- define "airflow.scheduler.fullname" -}} +{{- printf "%s-scheduler" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow Dag Processor fullname +*/}} +{{- define "airflow.dagProcessor.fullname" -}} +{{- printf "%s-dag-processor" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow Triggerer fullname +*/}} +{{- define "airflow.triggerer.fullname" -}} +{{- printf "%s-triggerer" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow Worker fullname +*/}} +{{- define "airflow.worker.fullname" -}} +{{- printf "%s-worker" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow metrics fullname +*/}} +{{- define "airflow.metrics.fullname" -}} +{{- printf "%s-statsd-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow scheduler service name +*/}} +{{- define "airflow.scheduler.serviceName" -}} +{{- printf "%s-hl" (include "airflow.scheduler.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Airflow image name +*/}} +{{- define "airflow.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Airflow Metrics image name +*/}} +{{- define "airflow.metrics.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "airflow.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create a default fully qualified postgresql name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "airflow.postgresql.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "postgresql" "chartValues" .Values.postgresql "context" $) -}} +{{- end -}} + +{{/* +Get the Redis® fullname +*/}} +{{- define "airflow.redis.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "redis" "chartValues" .Values.redis "context" $) -}} +{{- end -}} + +{{/* +Create a default fully qualified redis name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "airflow.redis.host" -}} +{{- if .Values.redis.enabled -}} + {{- printf "%s-master" (include "airflow.redis.fullname" .) -}} +{{- else -}} + {{- printf "%s" (tpl .Values.externalRedis.host $) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the Redis® port +*/}} +{{- define "airflow.redis.port" -}} +{{- if .Values.redis.enabled -}} + {{- print .Values.redis.master.service.ports.redis -}} +{{- else -}} + {{- print .Values.externalRedis.port -}} +{{- end -}} +{{- end -}} + +{{/* +Get the Redis® credentials secret. +*/}} +{{- define "airflow.redis.secretName" -}} +{{- if .Values.redis.enabled -}} + {{- if .Values.redis.auth.existingSecret -}} + {{- print (tpl .Values.redis.auth.existingSecret .) -}} + {{- else -}} + {{- print (include "airflow.redis.fullname" .) -}} + {{- end -}} +{{- else if .Values.externalRedis.existingSecret -}} + {{- print (tpl .Values.externalRedis.existingSecret .) -}} +{{- else -}} + {{- printf "%s-externalredis" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the Postgresql credentials secret. +*/}} +{{- define "airflow.database.secretName" -}} +{{- if .Values.postgresql.enabled -}} + {{- $existingSecret := coalesce (((.Values.global.postgresql).auth).existingSecret) .Values.postgresql.auth.existingSecret -}} + {{- if $existingSecret -}} + {{- print (tpl $existingSecret .) -}} + {{- else -}} + {{- print (include "airflow.postgresql.fullname" .) -}} + {{- end -}} +{{- else if .Values.externalDatabase.existingSecret -}} + {{- print (tpl .Values.externalDatabase.existingSecret .) -}} +{{- else -}} + {{- printf "%s-externaldb" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the secret name +*/}} +{{- define "airflow.secretName" -}} +{{- if .Values.auth.existingSecret -}} + {{- print (tpl .Values.auth.existingSecret .) -}} +{{- else -}} + {{- print (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configmap name +*/}} +{{- define "airflow.configMapName" -}} +{{- if .Values.existingConfigmap -}} + {{- print (tpl .Values.existingConfigmap .) -}} +{{- else -}} + {{- print (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configmap name for Airflow Webserver +*/}} +{{- define "airflow.web.configMapName" -}} +{{- if .Values.web.existingConfigmap -}} + {{- print (tpl .Values.web.existingConfigmap .) -}} +{{- else -}} + {{- print (include "airflow.web.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configmap name for StatsD exporter +*/}} +{{- define "airflow.metrics.configMapName" -}} +{{- if .Values.metrics.existingConfigmap -}} + {{- print (tpl .Values.metrics.existingConfigmap .) -}} +{{- else -}} + {{- print (include "airflow.metrics.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the TLS certificates for Airflow webserver +*/}} +{{- define "airflow.web.tls.secretName" -}} +{{- if or .Values.web.tls.autoGenerated.enabled (and (not (empty .Values.web.tls.cert)) (not (empty .Values.web.tls.key))) -}} + {{- printf "%s-crt" (include "airflow.web.fullname" .) -}} +{{- else -}} + {{- required "An existing secret name must be provided with TLS certs for Airflow webserver if cert and key are not provided!" (tpl .Values.web.tls.existingSecret .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the LDAP credentials secret. +*/}} +{{- define "airflow.ldap.secretName" -}} +{{- if .Values.ldap.existingSecret -}} + {{- print (tpl .Values.ldap.existingSecret .) -}} +{{- else -}} + {{- printf "%s-ldap" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret name containing the SSH keys for loading DAGs Git repositories +*/}} +{{- define "airflow.dags.ssh.secretName" -}} +{{- if .Values.dags.existingSshKeySecret -}} + {{- print (tpl .Values.dags.existingSshKeySecret .) -}} +{{- else -}} + {{- printf "%s-ssh" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret name containing the SSH keys for loading plugins Git repositories +*/}} +{{- define "airflow.plugins.ssh.secretName" -}} +{{- if .Values.plugins.existingSshKeySecret -}} + {{- print (tpl .Values.plugins.existingSshKeySecret .) -}} +{{- else -}} + {{- printf "%s-ssh" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "airflow.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a SQL connection string is used to connect to the database +*/}} +{{- define "airflow.database.useSqlConnection" -}} +{{- if and (not .Values.postgresql.enabled) (or .Values.externalDatabase.sqlConnection (and .Values.externalDatabase.existingSecret .Values.externalDatabase.existingSecretSqlConnectionKey)) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.database.host" -}} +{{- if eq .Values.postgresql.architecture "replication" }} +{{- (ternary (include "airflow.postgresql.fullname" .) (tpl .Values.externalDatabase.host $) .Values.postgresql.enabled | printf "%s-primary") | quote -}} +{{- else -}} +{{- ternary (include "airflow.postgresql.fullname" .) (tpl .Values.externalDatabase.host $) .Values.postgresql.enabled | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.database.user" -}} +{{- if .Values.postgresql.enabled }} + {{- if .Values.global.postgresql }} + {{- if .Values.global.postgresql.auth }} + {{- coalesce .Values.global.postgresql.auth.username .Values.postgresql.auth.username | quote -}} + {{- else -}} + {{- .Values.postgresql.auth.username | quote -}} + {{- end -}} + {{- else -}} + {{- .Values.postgresql.auth.username | quote -}} + {{- end -}} +{{- else -}} + {{- .Values.externalDatabase.user | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.database.name" -}} +{{- if .Values.postgresql.enabled }} + {{- if .Values.global.postgresql }} + {{- if .Values.global.postgresql.auth }} + {{- coalesce .Values.global.postgresql.auth.database .Values.postgresql.auth.database | quote -}} + {{- else -}} + {{- .Values.postgresql.auth.database | quote -}} + {{- end -}} + {{- else -}} + {{- .Values.postgresql.auth.database | quote -}} + {{- end -}} +{{- else -}} + {{- .Values.externalDatabase.database | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.database.secretKey" -}} +{{- if .Values.postgresql.enabled -}} + {{- print "password" -}} +{{- else -}} + {{- if and .Values.externalDatabase.existingSecret .Values.externalDatabase.existingSecretSqlConnectionKey -}} + {{- print (tpl .Values.externalDatabase.existingSecretSqlConnectionKey .) -}} + {{- else if .Values.externalDatabase.existingSecret -}} + {{- default "password" (tpl .Values.externalDatabase.existingSecretPasswordKey .) -}} + {{- else -}} + {{- ternary "password" "sql-connection" (empty .Values.externalDatabase.sqlConnection) -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.database.port" -}} +{{- ternary "5432" .Values.externalDatabase.port .Values.postgresql.enabled | quote -}} +{{- end -}} + +{{/* +Add environment variables to configure redis values +*/}} +{{- define "airflow.configure.redis" -}} +{{- if (not (or (eq .Values.executor "KubernetesExecutor" ) (eq .Values.executor "LocalKubernetesExecutor" ))) }} +- name: AIRFLOW_CELERY_BROKER_URL + valueFrom: + secretKeyRef: + name: {{ printf "%s-%s" .Release.Name "celerybroker" }} + key: celery-broker-url +{{- if .Values.celeryBrokerTransportOption }} +- name: AIRFLOW_CELERY_BROKER_TRANSPORT_OPTIONS + valueFrom: + secretKeyRef: + name: {{ printf "%s-%s" .Release.Name "celerybroker" }} + key: celery-broker-transport-option +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "airflow.redis.existingsecret.key" -}} +{{- if .Values.redis.enabled -}} + {{- printf "%s" "redis-password" -}} +{{- else -}} + {{- if .Values.externalRedis.existingSecret -}} + {{- if .Values.externalRedis.existingSecretPasswordKey -}} + {{- printf "%s" .Values.externalRedis.existingSecretPasswordKey -}} + {{- else -}} + {{- printf "%s" "redis-password" -}} + {{- end -}} + {{- else -}} + {{- printf "%s" "redis-password" -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure airflow common values +*/}} +{{- define "airflow.configure.airflow.common" -}} +- name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} +{{- if .Values.usePasswordFiles }} +- name: AIRFLOW__CORE__FERNET_KEY_CMD + value: "cat /opt/drycc/airflow/secrets/airflow-fernet-key" +- name: AIRFLOW__WEBSERVER__SECRET_KEY_CMD + value: "cat /opt/drycc/airflow/secrets/airflow-secret-key" +{{- if (include "airflow.isImageMajorVersion3" .) }} +- name: AIRFLOW__API_AUTH__JWT_SECRET_CMD + value: "cat /opt/drycc/airflow/secrets/airflow-jwt-secret-key" +{{- end }} +{{- else }} +- name: AIRFLOW__CORE__FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "airflow.secretName" . }} + key: airflow-fernet-key +- name: AIRFLOW__WEBSERVER__SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "airflow.secretName" . }} + key: airflow-secret-key +{{- if (include "airflow.isImageMajorVersion3" .) }} +- name: AIRFLOW__API_AUTH__JWT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "airflow.secretName" . }} + key: airflow-jwt-secret-key +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get the user defined LoadBalancerIP for this release. +Note, returns 127.0.0.1 if using ClusterIP. +*/}} +{{- define "airflow.serviceIP" -}} +{{- if eq .Values.service.type "ClusterIP" -}} +127.0.0.1 +{{- else -}} +{{- .Values.service.loadBalancerIP | default "127.0.0.1" -}} +{{- end -}} +{{- end -}} + +{{/* +Gets the host to be used for this application. +If not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty. +*/}} +{{- define "airflow.baseUrl" -}} +{{- $host := default (include "airflow.serviceIP" .) .Values.web.baseUrl -}} +{{- $port := printf ":%d" (int .Values.service.ports.http) -}} +{{- $schema := ternary "https://" "http://" (or .Values.web.tls.enabled (and .Values.ingress.enabled .Values.ingress.tls)) -}} +{{- if and (regexMatch "^https?://" .Values.web.baseUrl) (not .Values.ingress.enabled) -}} + {{- $schema = "" -}} +{{- end -}} +{{- if or (regexMatch ":\\d+$" .Values.web.baseUrl) (eq $port ":80") (eq $port ":443") -}} + {{- $port = "" -}} +{{- end -}} +{{- if and .Values.ingress.enabled .Values.ingress.hostname -}} + {{- $host = .Values.ingress.hostname -}} + {{- $port = "" -}} +{{- end -}} +{{- printf "%s%s%s" $schema $host $port -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "airflow.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "airflow.validateValues.dags.repositories" .) -}} +{{- $messages := append $messages (include "airflow.validateValues.dags.repository_details" .) -}} +{{- $messages := append $messages (include "airflow.validateValues.plugins.repositories" .) -}} +{{- $messages := append $messages (include "airflow.validateValues.plugins.repository_details" .) -}} +{{- $messages := append $messages (include "airflow.validateValues.triggerer.replicaCount" .) -}} +{{- $messages := append $messages (include "airflow.validateValues.metrics" . ) -}} +{{- $messages := append $messages (include "airflow.validateValues.executors" . ) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - At least one repository details must be provided when "dags.enabled" is "true" +*/}} +{{- define "airflow.validateValues.dags.repositories" -}} +{{- if and .Values.dags.enabled (empty .Values.dags.repositories) (empty .Values.dags.existingConfigmap) -}} +airflow: dags.repositories + At least one repository must be provided when enabling downloading DAG files + from git repositories (--set dags.repositories[0].repository="xxx" + --set dags.repositories[0].name="xxx" + --set dags.repositories[0].branch="name") +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - "dags.repositories.repository", "dags.repositories.name", "dags.repositories.branch" must be provided when "dags.enabled" is "true" +*/}} +{{- define "airflow.validateValues.dags.repository_details" -}} +{{- if .Values.dags.enabled -}} +{{- range $index, $repository_detail := .Values.dags.repositories }} +{{- if empty $repository_detail.repository -}} +airflow: dags.repositories[$index].repository + The repository must be provided when enabling downloading DAG files + from git repository (--set dags.repositories[$index].repository="xxx") +{{- end -}} +{{- if empty $repository_detail.branch -}} +airflow: dags.repositories[$index].branch + The branch must be provided when enabling downloading DAG files + from git repository (--set dags.repositories[$index].branch="xxx") +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - "plugins.repositories" must be provided when "plugins.enabled" is "true" +*/}} +{{- define "airflow.validateValues.plugins.repositories" -}} +{{- if and .Values.plugins.enabled (empty .Values.plugins.repositories) -}} +airflow: plugins.repositories + At least one repository must be provided when enabling downloading plugins + from git repositories (--set plugins.repositories[0].repository="xxx" + --set plugins.repositories[0].name="xxx" + --set plugins.repositories[0].branch="name") +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - "plugins.repositories.repository", "plugins.repositories.name", "plugins.repositories.branch" must be provided when "plugins.enabled" is "true" +*/}} +{{- define "airflow.validateValues.plugins.repository_details" -}} +{{- if .Values.plugins.enabled -}} +{{- range $index, $repository_detail := .Values.plugins.repositories }} +{{- if empty $repository_detail.repository -}} +airflow: plugins.repositories[$index].repository + The repository must be provided when enabling downloading DAG files + from git repository (--set plugins.repositories[$index].repository="xxx") +{{- end -}} +{{- if empty $repository_detail.branch -}} +airflow: plugins.repositories[$index].branch + The branch must be provided when enabling downloading DAG files + from git repository (--set plugins.repositories[$index].branch="xxx") +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - number of Triggerer replicas +*/}} +{{- define "airflow.validateValues.triggerer.replicaCount" -}} +{{- $replicaCount := int .Values.triggerer.replicaCount }} +{{- if and .Values.triggerer.enabled .Values.triggerer.persistence.enabled .Values.triggerer.persistence.existingClaim (or (gt $replicaCount 1) .Values.triggerer.autoscaling.hpa.enabled) -}} +airflow: triggerer.replicaCount + A single existing PVC cannot be shared between multiple replicas. + Please set a valid number of replicas (--set triggerer.replicaCount=1), + disable HPA (--set triggerer.autoscaling.hpa.enabled=false), disable persistence + (--set triggerer.persistence.enabled=false) or rely on dynamic provisioning via Persistent + Volume Claims (--set triggerer.persistence.existingClaim=""). +{{- end -}} +{{- end -}} + +{{/* +Validate values of Airflow - metrics +*/}} +{{- define "airflow.validateValues.metrics" -}} +{{- if and .Values.metrics.enabled (include "airflow.database.useSqlConnection" .) }} +airflow: metrics + The metrics feature is currently not supported when using an SQL connection string to connect to the database. +{{- end -}} +{{- end -}} + +{{/* +In Airflow version 2.1.0, the CeleryKubernetesExecutor requires setting workers with CeleryExecutor in order to work properly. +This is a workaround and is subject to Airflow official resolution. +Ref: https://github.com/bitnami/charts/pull/6096#issuecomment-856499047 +*/}} +{{- define "airflow.worker.executor" -}} +{{- print (ternary "CeleryExecutor" .Values.executor (eq .Values.executor "CeleryKubernetesExecutor")) -}} +{{- end -}} + +{{/* +Validates a semver constraint +*/}} +{{- define "airflow.semverCondition" -}} +{{- $constraint := .constraint -}} +{{- $imageVersion := (.imageVersion | toString) -}} + +{{/* tag 'latest' is an special case, where we fall to .Chart.AppVersion value */}} +{{- if eq "latest" $imageVersion -}} +{{- $imageVersion = .context.Chart.AppVersion -}} +{{- else -}} +{{- $imageVersion = (index (splitList "-" $imageVersion) 0 ) -}} +{{- end -}} + +{{- if semverCompare $constraint $imageVersion -}} +true +{{- end -}} +{{- end -}} + +{{/* +Validates the image tag version is equal or higher than 3.0.0 +*/}} +{{- define "airflow.isImageMajorVersion3" -}} +{{- include "airflow.semverCondition" (dict "constraint" "^3" "imageVersion" .Values.image.tag "context" $) -}} +{{- end -}} + +{{/* +Validates the image tag version is equal or higher than 2.0.0 +*/}} +{{- define "airflow.isImageMajorVersion2" -}} +{{- include "airflow.semverCondition" (dict "constraint" "^2" "imageVersion" .Values.image.tag "context" $) -}} +{{- end -}} + +{{/* +Checks whether the scheduler object has to be an statefulset or a deployment depending on the configured executors +*/}} +{{- define "airflow.scheduler.requiresStatefulset" -}} +{{- $configuredExecutors := ternary (splitList "," .Values.executor) (list .Values.executor) (contains "," .Values.executor) -}} +{{- $statefulsetExecutors := list "SequentialExecutor" "LocalExecutor" "LocalCeleryExecutor" "LocalKubernetesExecutor" -}} +{{- $statefulset := false -}} +{{- range $executor := $configuredExecutors -}} + {{- if (has $executor $statefulsetExecutors) -}} + {{- $statefulset = true -}} + {{- end -}} +{{- end -}} +{{- if $statefulset -}} +true +{{- end -}} +{{- end -}} + +{{/* +Validates deprecated executors on Airflow 3 are not used +https://airflow.apache.org/docs/apache-airflow/stable/installation/upgrading_to_airflow3.html#breaking-changes +*/}} +{{- define "airflow.validateValues.executors" -}} +{{- $configuredExecutors := ternary (splitList "," .Values.executor) (list .Values.executor) (contains "," .Values.executor) -}} +{{- $deprecatedExecutors := list "SequentialExecutor" "CeleryKubernetesExecutor" "LocalKubernetesExecutor" -}} +{{- $executorsError := list -}} +{{- if (include "airflow.isImageMajorVersion3" .) }} + {{- range $executor := $configuredExecutors -}} + {{- if (has $executor $deprecatedExecutors) -}} + {{- $executorsError = append $executorsError $executor -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{/* configuredExecutors can't be empty */}} +{{- if (has "" $configuredExecutors) }} +airflow: executors + You need to provide at least one value for the '.executor' parameter. +{{- end -}} +{{- if not (empty $executorsError) -}} +airflow: executors + The next executors have been deprecated starting with Airflow 3.0.0 and can't be used: + {{- range $executorsError }} + - {{.}} + {{- end }} + See https://airflow.apache.org/docs/apache-airflow/stable/installation/upgrading_to_airflow3.html#breaking-changes for further details. +{{- end -}} +{{- end -}} diff --git a/addons/airflow/3/chart/airflow-3/templates/_init_containers_sidecars.tpl b/addons/airflow/3/chart/airflow-3/templates/_init_containers_sidecars.tpl new file mode 100644 index 00000000..a784ec17 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/_init_containers_sidecars.tpl @@ -0,0 +1,626 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Returns an init-container that prepares the Airflow configuration files for main containers to use them +*/}} +{{- define "airflow.defaultInitContainers.prepareConfig" -}} +- name: prepare-config + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.defaultInitContainers.prepareConfig.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.prepareConfig.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.prepareConfig.resources }} + resources: {{- toYaml .Values.defaultInitContainers.prepareConfig.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.prepareConfig.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.prepareConfig.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - init-stack + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/libairflow.sh + + mkdir -p /emptydir/app-base-dir /emptydir/app-conf-dir + + # Copy the configuration files to the writable directory + cp /opt/drycc/airflow/airflow.cfg /emptydir/app-base-dir/airflow.cfg + + # Apply changes affecting credentials + export AIRFLOW_CONF_FILE="/emptydir/app-base-dir/airflow.cfg" + {{- if (include "airflow.database.useSqlConnection" .) }} + {{- if and .Values.usePasswordFiles }} + export AIRFLOW_DATABASE_SQL_CONN="$(< $AIRFLOW_DATABASE_SQL_CONN_FILE)" + {{- end }} + airflow_conf_set "database" "sql_alchemy_conn" "$AIRFLOW_DATABASE_SQL_CONN" + {{- else }} + {{- if and .Values.usePasswordFiles }} + export AIRFLOW_DATABASE_PASSWORD="$(< $AIRFLOW_DATABASE_PASSWORD_FILE)" + {{- end }} + db_user="$(airflow_encode_url "$AIRFLOW_DATABASE_USERNAME")" + db_password="$(airflow_encode_url "$AIRFLOW_DATABASE_PASSWORD")" + airflow_conf_set "database" "sql_alchemy_conn" "postgresql+psycopg2://${db_user}:${db_password}@${AIRFLOW_DATABASE_HOST}:${AIRFLOW_DATABASE_PORT_NUMBER}/${AIRFLOW_DATABASE_NAME}" + {{- end }} + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- if and .Values.usePasswordFiles }} + export REDIS_PASSWORD="$(< $REDIS_PASSWORD_FILE)" + {{- end }} + redis_credentials=":$(airflow_encode_url "$REDIS_PASSWORD")" + [[ -n "$REDIS_USER" ]] && redis_credentials="$(airflow_encode_url "$REDIS_USER")$redis_credentials" + {{- if (include "airflow.database.useSqlConnection" .) }} + airflow_conf_set "celery" "result_backend" "db+${AIRFLOW_DATABASE_SQL_CONN}" + {{- else }} + airflow_conf_set "celery" "result_backend" "db+postgresql://${db_user}:${db_password}@${AIRFLOW_DATABASE_HOST}:${AIRFLOW_DATABASE_PORT_NUMBER}/${AIRFLOW_DATABASE_NAME}" + {{- end }} + {{- if .Values.celeryBrokerUrl }} + airflow_conf_set "celery" "broker_url" "${AIRFLOW_CELERY_BROKER_URL}" + {{- end }} + {{- if .Values.celeryBrokerTransportOption }} + readarray -t keys < <(echo "$AIRFLOW_CELERY_BROKER_TRANSPORT_OPTIONS" | jq -r --compact-output 'keys[]') + for key in "${keys[@]}"; do + airflow_conf_set "celery_broker_transport_options" "$key" "$(echo $AIRFLOW_CELERY_BROKER_TRANSPORT_OPTIONS | jq -r --compact-output ".$key")" + done + {{- end }} + {{- end }} + # Configure authentication backend + airflow_conf_set "core" "auth_manager" "airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager" + info "Airflow configuration ready" + + if [[ -f "/opt/drycc/airflow/config/airflow_local_settings.py" ]]; then + cp /opt/drycc/airflow/config/airflow_local_settings.py /emptydir/app-conf-dir/airflow_local_settings.py + else + touch /emptydir/app-conf-dir/airflow_local_settings.py + fi + + # HACK: When testing the db connection it creates an empty airflow.db file at the + # application root + touch /emptydir/app-base-dir/airflow.db + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "airflow.database.useSqlConnection" .) }} + {{- if .Values.usePasswordFiles }} + - name: AIRFLOW_DATABASE_SQL_CONN_FILE + value: {{ printf "/opt/drycc/airflow/secrets/%s" (include "airflow.database.secretKey" .) }} + {{- else }} + - name: AIRFLOW_DATABASE_SQL_CONN + valueFrom: + secretKeyRef: + name: {{ include "airflow.database.secretName" . }} + key: {{ include "airflow.database.secretKey" . }} + {{- end }} + {{- else }} + - name: AIRFLOW_DATABASE_NAME + value: {{ include "airflow.database.name" . }} + - name: AIRFLOW_DATABASE_USERNAME + value: {{ include "airflow.database.user" . }} + {{- if .Values.usePasswordFiles }} + - name: AIRFLOW_DATABASE_PASSWORD_FILE + value: {{ printf "/opt/drycc/airflow/secrets/%s" (include "airflow.database.secretKey" .) }} + {{- else }} + - name: AIRFLOW_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "airflow.database.secretName" . }} + key: {{ include "airflow.database.secretKey" . }} + {{- end }} + - name: AIRFLOW_DATABASE_HOST + value: {{ include "airflow.database.host" . }} + - name: AIRFLOW_DATABASE_PORT_NUMBER + value: {{ include "airflow.database.port" . }} + {{- end }} + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 4 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /emptydir + - name: configuration + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: airflow.cfg + - name: configuration + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: airflow_local_settings.py + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} +{{- end -}} + +{{/* +Returns an init-container that prepares the Airflow Webserver configuration files for main containers to use them +*/}} +{{- define "airflow.defaultInitContainers.prepareWebConfig" -}} +- name: prepare-web-config + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.defaultInitContainers.prepareConfig.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.prepareConfig.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.prepareConfig.resources }} + resources: {{- toYaml .Values.defaultInitContainers.prepareConfig.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.prepareConfig.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.prepareConfig.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - init-stack + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/libairflow.sh + + # Copy the configuration files to the writable directory + cp /opt/drycc/airflow/webserver_config.py /emptydir/app-base-dir/webserver_config.py + {{- if .Values.ldap.enabled }} + {{- if .Values.usePasswordFiles }} + export AIRFLOW_LDAP_BIND_PASSWORD="$(< $AIRFLOW_LDAP_BIND_PASSWORD_FILE)" + {{- end }} + export AIRFLOW_WEBSERVER_CONF_FILE="/emptydir/app-base-dir/webserver_config.py" + airflow_webserver_conf_set "AUTH_LDAP_BIND_PASSWORD" "$AIRFLOW_LDAP_BIND_PASSWORD" "yes" + {{- end }} + info "Airflow webserver configuration ready" + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.ldap.enabled }} + {{- if .Values.usePasswordFiles }} + - name: AIRFLOW_LDAP_BIND_PASSWORD_FILE + value: "/opt/drycc/airflow/secrets/bind-password" + {{- else }} + - name: AIRFLOW_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "airflow.ldap.secretName" . }} + key: bind-password + {{- end }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /emptydir + - name: webserver-configuration + mountPath: /opt/drycc/airflow/webserver_config.py + subPath: webserver_config.py + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} +{{- end -}} + +{{/* +Returns an init-container that waits for db migrations to be ready +*/}} +{{- define "airflow.defaultInitContainers.waitForDBMigrations" -}} +- name: wait-for-db-migrations + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.defaultInitContainers.waitForDBMigrations.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.waitForDBMigrations.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.waitForDBMigrations.resources }} + resources: {{- toYaml .Values.defaultInitContainers.waitForDBMigrations.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.waitForDBMigrations.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.waitForDBMigrations.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - init-stack + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/airflow-env.sh + . /opt/drycc/scripts/libairflow.sh + + info "Trying to connect to the database server" + airflow_wait_for_db_connection + info "Waiting for db migrations to be completed" + airflow_wait_for_db_migrations + {{- if (include "airflow.isImageMajorVersion3" .) }} + info "Waiting for the admin user to exist" + airflow_wait_for_admin_user + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 4 }} + {{- end }} +{{- end -}} + +{{/* +Returns the name that will identify the repository internally and it will be used to +create folders or volume names +*/}} +{{- define "airflow.dagsPlugins.repository.name" -}} + {{- $defaultName := regexFind "/.*$" .repository | replace "//" "" | replace "/" "-" | replace "." "-" -}} + {{- .name | default $defaultName | kebabcase -}} +{{- end -}} + +{{/* +Returns shared structure between load-dags and load-plugins init containers +*/}} +{{- define "airflow.defaultInitContainers.shared" -}} +- image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.loadDAGsPlugins.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.resources }} + resources: {{- toYaml .Values.defaultInitContainers.loadDAGsPlugins.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.loadDAGsPlugins.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.loadDAGsPlugins.resourcesPreset) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.defaultInitContainers.loadDAGsPlugins.command "context" .) | nindent 4 }} + {{- else }} + command: ["init-stack", "/bin/bash"] + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVars "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsCM .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsSecret }} + envFrom: + {{- if .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsCM }} + {{- end }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.defaultInitContainers.loadDAGsPlugins.extraEnvVarsSecret }} + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /etc/ssh + subPath: etc-ssh-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/.ssh + subPath: ssh-dir + {{- if .Values.defaultInitContainers.loadDAGsPlugins.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.defaultInitContainers.loadDAGsPlugins.extraVolumeMounts "context" $) | nindent 4 }} + {{- end }} +{{- end -}} + +{{/* +Returns an init-container that loads DAGs from a ConfigMap or Git repositories +*/}} +{{- define "airflow.defaultInitContainers.loadDAGs" -}} +{{ include "airflow.defaultInitContainers.shared" . }} + {{- if not (empty .Values.dags.existingConfigmap) }} + - name: external-dags + mountPath: /configmap + {{- end }} + {{- if or (not (empty .Values.dags.existingConfigmap)) (not (empty .Values.dags.repositories)) }} + - name: empty-dir + mountPath: /dags + subPath: app-dags-dir + {{- end }} + {{- if or .Values.dags.sshKey .Values.dags.existingSshKeySecret }} + - name: dags-ssh-key + mountPath: /opt/drycc/airflow/.ssh/dags-ssh-key + subPath: dags-ssh-key + {{- end }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.defaultInitContainers.loadDAGsPlugins.args "context" .) | nindent 4 }} + {{- else }} + args: + - -ec + - | + . /opt/drycc/scripts/libfs.sh + . /opt/drycc/scripts/libos.sh + + if ! am_i_root && [[ -e "$LIBNSS_WRAPPER_PATH" ]]; then + echo "airflow:x:$(id -u):$(id -g):Airflow:$AIRFLOW_HOME:/bin/false" > "$NSS_WRAPPER_PASSWD" + echo "airflow:x:$(id -g):" > "$NSS_WRAPPER_GROUP" + + export LD_PRELOAD="$LIBNSS_WRAPPER_PATH" + export HOME="$AIRFLOW_HOME" + fi + + {{- if or .Values.dags.sshKey .Values.dags.existingSshKeySecret }} + export GIT_SSH_COMMAND="ssh -i /opt/drycc/airflow/.ssh/dags-ssh-key -o StrictHostKeyChecking=no" + {{- end }} + {{- range .Values.dags.repositories }} + is_dir_empty "/dags/{{ include "airflow.dagsPlugins.repository.name" . }}" && git clone {{ .repository }} --depth 1 --branch {{ .branch }} /dags/{{ include "airflow.dagsPlugins.repository.name" . }} + {{- end }} + {{- if not (empty .Values.dags.existingConfigmap) }} + mkdir -p /dags/external + cp -v /configmap/* /dags/external + {{- end }} + {{- end }} + name: load-dags +{{- end -}} + +{{/* +Returns an init-container that loads plugins from Git repositories +*/}} +{{- define "airflow.defaultInitContainers.loadPlugins" -}} +{{ include "airflow.defaultInitContainers.shared" . }} + - name: empty-dir + mountPath: /plugins + subPath: app-plugins-dir + {{- if or .Values.plugins.sshKey .Values.plugins.existingSshKeySecret }} + - name: plugins-ssh-key + mountPath: /opt/drycc/airflow/.ssh/plugins-ssh-key + subPath: plugins-ssh-key + {{- end }} + {{- if .Values.defaultInitContainers.loadDAGsPlugins.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.defaultInitContainers.loadDAGsPlugins.args "context" .) | nindent 4 }} + {{- else }} + args: + - -ec + - | + . /opt/drycc/scripts/libfs.sh + . /opt/drycc/scripts/libos.sh + + if ! am_i_root && [[ -e "$LIBNSS_WRAPPER_PATH" ]]; then + echo "airflow:x:$(id -u):$(id -g):Airflow:$AIRFLOW_HOME:/bin/false" > "$NSS_WRAPPER_PASSWD" + echo "airflow:x:$(id -g):" > "$NSS_WRAPPER_GROUP" + + export LD_PRELOAD="$LIBNSS_WRAPPER_PATH" + export HOME="$AIRFLOW_HOME" + fi + + {{- if or .Values.plugins.sshKey .Values.plugins.existingSshKeySecret }} + export GIT_SSH_COMMAND="ssh -i /opt/drycc/airflow/.ssh/plugins-ssh-key -o StrictHostKeyChecking=no" + {{- end }} + {{- range .Values.plugins.repositories }} + is_dir_empty "/plugins/{{ include "airflow.dagsPlugins.repository.name" . }}" && git clone {{ .repository }} --depth 1 --branch {{ .branch }} /plugins/{{ include "airflow.dagsPlugins.repository.name" . }} + {{- end }} + {{- end }} + name: load-plugins +{{- end -}} + +{{/* +Returns shared structure between sync-dags and sync-plugins sidecars +*/}} +{{- define "airflow.defaultSidecars.shared" -}} +- image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultSidecars.syncDAGsPlugins.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.resources }} + resources: {{- toYaml .Values.defaultSidecars.syncDAGsPlugins.resources | nindent 4 }} + {{- else if ne .Values.defaultSidecars.syncDAGsPlugins.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultSidecars.syncDAGsPlugins.resourcesPreset) | nindent 4 }} + {{- end }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.defaultSidecars.syncDAGsPlugins.command "context" .) | nindent 4 }} + {{- else }} + command: ["init-stack", "/bin/bash"] + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.defaultSidecars.syncDAGsPlugins.extraEnvVars "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsCM .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsSecret }} + envFrom: + {{- if .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsCM }} + {{- end }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.defaultSidecars.syncDAGsPlugins.extraEnvVarsSecret }} + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /etc/ssh + subPath: etc-ssh-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/.ssh + subPath: ssh-dir + {{- if .Values.defaultSidecars.syncDAGsPlugins.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.defaultSidecars.syncDAGsPlugins.extraVolumeMounts "context" $) | nindent 4 }} + {{- end }} +{{- end -}} + +{{/* +Returns a sidecar that syncs DAGs from Git repositories +*/}} +{{- define "airflow.defaultSidecars.syncDAGs" -}} +{{ include "airflow.defaultSidecars.shared" . }} + - name: empty-dir + mountPath: /dags + subPath: app-dags-dir + {{- if or .Values.dags.sshKey .Values.dags.existingSshKeySecret }} + - name: dags-ssh-key + mountPath: /opt/drycc/airflow/.ssh/dags-ssh-key + subPath: dags-ssh-key + {{- end }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.defaultSidecars.syncDAGsPlugins.args "context" .) | nindent 4 }} + {{- else }} + args: + - -ec + - | + . /opt/drycc/scripts/libos.sh + + if ! am_i_root && [[ -e "$LIBNSS_WRAPPER_PATH" ]]; then + echo "airflow:x:$(id -u):$(id -g):Airflow:$AIRFLOW_HOME:/bin/false" > "$NSS_WRAPPER_PASSWD" + echo "airflow:x:$(id -g):" > "$NSS_WRAPPER_GROUP" + + export LD_PRELOAD="$LIBNSS_WRAPPER_PATH" + export HOME="$AIRFLOW_HOME" + fi + + while true; do + {{- if or .Values.dags.sshKey .Values.dags.existingSshKeySecret }} + export GIT_SSH_COMMAND="ssh -i /opt/drycc/airflow/.ssh/dags-ssh-key -o StrictHostKeyChecking=no" + {{- end }} + {{- range .Values.dags.repositories }} + cd /dags/{{ include "airflow.dagsPlugins.repository.name" . }} && git pull origin {{ .branch }} || true + {{- end }} + sleep {{ default "60" .Values.defaultSidecars.syncDAGsPlugins.interval }} + done + {{- end }} + name: sync-dags +{{- end -}} + +{{/* +Returns a sidecar that syncs plugins from Git repositories +*/}} +{{- define "airflow.defaultSidecars.syncPlugins" -}} +{{ include "airflow.defaultSidecars.shared" . }} + - name: empty-dir + mountPath: /plugins + subPath: app-plugins-dir + {{- if or .Values.plugins.sshKey .Values.plugins.existingSshKeySecret }} + - name: plugins-ssh-key + mountPath: /opt/drycc/airflow/.ssh/plugins-ssh-key + subPath: plugins-ssh-key + {{- end }} + {{- if .Values.defaultSidecars.syncDAGsPlugins.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.defaultSidecars.syncDAGsPlugins.args "context" .) | nindent 4 }} + {{- else }} + args: + - -ec + - | + . /opt/drycc/scripts/libos.sh + + if ! am_i_root && [[ -e "$LIBNSS_WRAPPER_PATH" ]]; then + echo "airflow:x:$(id -u):$(id -g):Airflow:$AIRFLOW_HOME:/bin/false" > "$NSS_WRAPPER_PASSWD" + echo "airflow:x:$(id -g):" > "$NSS_WRAPPER_GROUP" + + export LD_PRELOAD="$LIBNSS_WRAPPER_PATH" + export HOME="$AIRFLOW_HOME" + fi + {{- if or .Values.plugins.sshKey .Values.plugins.existingSshKeySecret }} + export GIT_SSH_COMMAND="ssh -i /opt/drycc/airflow/.ssh/plugins-ssh-key -o StrictHostKeyChecking=no" + {{- end }} + while true; do + {{- range .Values.plugins.repositories }} + cd /plugins/{{ include "airflow.dagsPlugins.repository.name" . }} && git pull origin {{ .branch }} || true + {{- end }} + sleep {{ default "60" .Values.defaultSidecars.syncDAGsPlugins.interval }} + done + {{- end }} + name: sync-plugins +{{- end -}} + +{{/* +Returns the volume mounts to use on Airflow containers to mount custom DAGs +*/}} +{{- define "airflow.dags.volumeMounts" -}} +{{- if not (empty .Values.dags.existingConfigmap) }} +- name: empty-dir + mountPath: /opt/drycc/airflow/dags/external + subPath: app-dags-dir/external +{{- end }} +{{- range .Values.dags.repositories }} +- name: empty-dir + mountPath: /opt/drycc/airflow/dags/git_{{ include "airflow.dagsPlugins.repository.name" . }} + {{- if .path }} + subPath: app-dags-dir/{{ include "airflow.dagsPlugins.repository.name" . }}/{{ .path }} + {{- else }} + subPath: app-dags-dir/{{ include "airflow.dagsPlugins.repository.name" . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the extra volumes to add on Airflow pods to load custom DAGS +*/}} +{{- define "airflow.dags.volumes" -}} +{{- if .Values.dags.existingConfigmap }} +- name: external-dags + configMap: + name: {{ tpl .Values.dags.existingConfigmap $ }} +{{- end }} +{{- if or .Values.dags.sshKey .Values.dags.existingSshKeySecret }} +- name: dags-ssh-key + secret: + secretName: {{ include "airflow.dags.ssh.secretName" . }} + items: + - key: {{ default "dags-ssh-key" (tpl .Values.dags.existingSshKeySecretKey .) }} + path: dags-ssh-key + mode: 0600 +{{- end }} +{{- end -}} + +{{/* +Returns the volume mounts to use on Airflow containers to mount custom plugins +*/}} +{{- define "airflow.plugins.volumeMounts" -}} +{{- range .Values.plugins.repositories }} +- name: empty-dir + mountPath: /opt/drycc/airflow/plugins/git_{{ include "airflow.dagsPlugins.repository.name" . }} + {{- if .path }} + subPath: app-plugins-dir/{{ include "airflow.dagsPlugins.repository.name" . }}/{{ .path }} + {{- else }} + subPath: app-plugins-dir/{{ include "airflow.dagsPlugins.repository.name" . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the extra volumes to add on Airflow pods to load custom plugins +*/}} +{{- define "airflow.plugins.volumes" -}} +{{- if or .Values.plugins.sshKey .Values.plugins.existingSshKeySecret }} +- name: plugins-ssh-key + secret: + secretName: {{ include "airflow.plugins.ssh.secretName" . }} + items: + - key: {{ default "plugins-ssh-key" (tpl .Values.plugins.existingSshKeySecretKey .) }} + path: plugins-ssh-key + mode: 0600 +{{- end }} +{{- end -}} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/configmap-pip.yaml b/addons/airflow/3/chart/airflow-3/templates/config/configmap-pip.yaml new file mode 100644 index 00000000..88cc65c5 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/configmap-pip.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or .Values.requirements .Values.pip }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-req + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +data: + {{- if .Values.requirements }} + requirements.txt: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.requirements "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.pip }} + pip.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.pip "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/configmap-pod-template.yaml b/addons/airflow/3/chart/airflow-3/templates/config/configmap-pod-template.yaml new file mode 100644 index 00000000..3cf7c175 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/configmap-pod-template.yaml @@ -0,0 +1,270 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $kube := (contains "KubernetesExecutor" .Values.executor) -}} +{{- if $kube }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-pod-template" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +data: + pod_template.yaml: |- + {{- if .Values.worker.podTemplate }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.podTemplate "context" $) | nindent 4 }} + {{- else }} + apiVersion: v1 + kind: Pod + metadata: + name: k8s-executor-pod + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: worker + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + checksum/webserver-configmap: {{ include (print $.Template.BasePath "/web/configmap.yaml") . | sha256sum }} + {{- if .Values.worker.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.worker.automountServiceAccountToken }} + {{- if .Values.worker.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.worker.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.worker.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.worker.podAffinityPreset "component" "worker" "topologyKey" .Values.worker.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.worker.podAntiAffinityPreset "component" "worker" "topologyKey" .Values.worker.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.worker.nodeAffinityPreset.type "key" .Values.worker.nodeAffinityPreset.key "values" .Values.worker.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.worker.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.worker.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.worker.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.worker.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.worker.priorityClassName }} + priorityClassName: {{ .Values.worker.priorityClassName | quote }} + {{- end }} + restartPolicy: Never + {{- if .Values.worker.schedulerName }} + schedulerName: {{ .Values.worker.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.worker.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.worker.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- include "airflow.defaultInitContainers.prepareWebConfig" . | nindent 8 }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.worker.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-worker + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.worker.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.worker.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.worker.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.worker.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.worker.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.worker.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_COMPONENT_TYPE + value: "worker" + - name: AIRFLOW__CORE__EXECUTOR + value: LocalExecutor + - name: AIRFLOW_EXECUTOR + value: {{ include "airflow.worker.executor" . }} + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.worker.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.worker.extraEnvVarsCM .Values.worker.extraEnvVarsSecret .Values.worker.extraEnvVarsSecrets .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.extraEnvVarsSecrets }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.worker.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.worker.extraEnvVarsCM }} + {{- end }} + {{- if .Values.worker.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.worker.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.extraEnvVarsSecrets }} + {{- range .Values.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.worker.extraEnvVarsSecrets }} + {{- range .Values.worker.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- end }} + ports: + - name: worker + containerPort: {{ .Values.worker.containerPorts.http }} + {{- if .Values.worker.resources }} + resources: {{- toYaml .Values.worker.resources | nindent 12 }} + {{- else if ne .Values.worker.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.worker.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.worker.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: worker + {{- end }} + {{- if .Values.worker.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: worker + {{- end }} + {{- if .Values.worker.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: worker + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/webserver_config.py + subPath: app-base-dir/webserver_config.py + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + mountPath: /opt/drycc/airflow/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.worker.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.worker.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + - name: webserver-configuration + configMap: + name: {{ include "airflow.web.configMapName" . }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- if .Values.ldap.enabled }} + - secret: + name: {{ include "airflow.ldap.secretName" . }} + {{- end }} + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + secret: + secretName: {{ template "airflow.web.tls.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/configmap.yaml b/addons/airflow/3/chart/airflow-3/templates/config/configmap.yaml new file mode 100644 index 00000000..a967b6b1 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/configmap.yaml @@ -0,0 +1,89 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* +Return the Airflow common configuration. +ref: https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html +*/}} +{{- define "airflow.configuration" -}} +{{- if .Values.configuration }} +{{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" .) }} +{{- else }} +core: + load_examples: {{ ternary "True" "False" .Values.loadExamples | squote }} + executor: {{ .Values.executor | quote }} + {{- if (include "airflow.isImageMajorVersion3" .) }} + execution_api_server_url: {{ printf "http://%s:%s/execution/" (include "airflow.web.fullname" .) (.Values.service.ports.http | toString) | quote }} + {{- end }} +logging: + colored_console_log: 'False' +metrics: + statsd_on: {{ ternary "True" "False" (.Values.metrics.enabled) | squote }} + statsd_port: {{ .Values.metrics.service.ports.ingest | quote }} + statsd_prefix: airflow + statsd_host: {{ include "airflow.metrics.fullname" . | quote }} +scheduler: + standalone_dag_processor: {{ ternary "True" "False" .Values.dagProcessor.enabled | squote }} +triggerer: + {{- if (include "airflow.isImageMajorVersion2" .) }} + default_capacity: {{ .Values.triggerer.defaultCapacity | quote }} + {{- else }} + capacity: {{ .Values.triggerer.defaultCapacity | quote }} + {{- end }} +webserver: + base_url: {{ include "airflow.baseUrl" . | quote }} + enable_proxy_fix: {{ ternary "True" "False" (and .Values.ingress.enabled .Values.ingress.tls) | squote }} + {{- if .Values.web.tls.enabled }} + web_server_ssl_cert: "/opt/drycc/airflow/certs/tls.crt" + web_server_ssl_key: "/opt/drycc/airflow/certs/tls.key" + {{- end }} + {{- if (include "airflow.isImageMajorVersion2" .) }} + web_server_port: {{ .Values.web.containerPorts.http | quote }} + {{- end }} +{{- if (include "airflow.isImageMajorVersion3" .) }} +api: + base_url: {{ include "airflow.baseUrl" . | quote }} + port: {{ .Values.web.containerPorts.http | quote }} +{{- end }} +{{- if contains "KubernetesExecutor" .Values.executor }} +kubernetes_executor: + namespace: {{ include "common.names.namespace" . | quote }} + worker_container_repository: {{ printf "%s/%s" .Values.image.registry .Values.image.repository | quote }} + worker_container_tag: {{ .Values.image.tag | quote }} + delete_worker_pods: 'True' + delete_worker_pods_on_failure: 'True' + pod_template_file: "/opt/drycc/airflow/config/pod_template.yaml" +{{- end }} +{{- end }} +{{- end }} + +{{- if not .Values.existingConfigmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +data: + {{- $configuration := include "airflow.configuration" . | fromYaml -}} + {{- if .Values.overrideConfiguration }} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.overrideConfiguration "context" .) | fromYaml }} + {{- $configuration = mustMergeOverwrite $configuration $overrideConfiguration }} + {{- end }} + airflow.cfg: |- + {{- range $section, $settings := $configuration }} + [{{ $section }}] + {{- range $key, $val := $settings }} + {{ $key }} = {{ $val }} + {{- end }} + {{- end }} + {{- if .Values.localSettings }} + airflow_local_settings.py: |- + {{- include "common.tplvalues.render" (dict "value" .Values.localSettings "context" .) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/secret-external-broker.yaml b/addons/airflow/3/chart/airflow-3/templates/config/secret-external-broker.yaml new file mode 100644 index 00000000..eadd9fd3 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/secret-external-broker.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (not (or (eq .Values.executor "KubernetesExecutor" ) (eq .Values.executor "LocalKubernetesExecutor" ))) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%s" .Release.Name "celerybroker" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.celeryBrokerUrl }} + celery-broker-url: {{ .Values.celeryBrokerUrl | b64enc | quote }} + {{- end }} + {{- if .Values.celeryBrokerTransportOption }} + celery-broker-transport-option: {{ .Values.celeryBrokerTransportOption | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/secret-external-db.yaml b/addons/airflow/3/chart/airflow-3/templates/config/secret-external-db.yaml new file mode 100644 index 00000000..ceaf5b9d --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/secret-external-db.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-externaldb" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{ include "airflow.database.secretKey" . }}: {{ default .Values.externalDatabase.password .Values.externalDatabase.sqlConnection | b64enc | quote }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/secret-ldap.yaml b/addons/airflow/3/chart/airflow-3/templates/config/secret-ldap.yaml new file mode 100644 index 00000000..2c9e6f20 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/secret-ldap.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.ldap.enabled (not .Values.ldap.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-ldap" (include "common.names.fullname" .) | quote }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + bind-password: {{ .Values.ldap.bindpw | b64enc | quote }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/secret-ssh.yaml b/addons/airflow/3/chart/airflow-3/templates/config/secret-ssh.yaml new file mode 100644 index 00000000..92507e5a --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/secret-ssh.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or (and .Values.dags.sshKey (not .Values.dags.existingSshKeySecret)) (and .Values.plugins.sshKey (not .Values.plugins.existingSshKeySecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-ssh" (include "common.names.fullname" .) | quote }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.dags.sshKey (not .Values.dags.existingSshKeySecret) }} + dags-ssh-key: {{ .Values.dags.sshKey | b64enc | quote }} + {{- end }} + {{- if and .Values.plugins.sshKey (not .Values.plugins.existingSshKeySecret) }} + plugins-ssh-key: {{ .Values.plugins.sshKey | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/config/secret.yaml b/addons/airflow/3/chart/airflow-3/templates/config/secret.yaml new file mode 100644 index 00000000..bdd27d91 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/config/secret.yaml @@ -0,0 +1,39 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.auth.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + airflow-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "airflow-password" "providedValues" (list "auth.password") "context" $) }} + # Airflow keys must be base64-encoded, hence we need to pipe to 'b64enc' twice + # The auto-generation mechanism available at "common.secrets.passwords.manage" isn't compatible with encoding twice + # Therefore, we can only use this function if the secret already exists + {{- if or (include "common.secrets.exists" (dict "secret" (include "common.names.fullname" .) "context" $)) (not (empty .Values.auth.fernetKey)) }} + airflow-fernet-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "airflow-fernet-key" "providedValues" (list "auth.fernetKey") "context" $) }} + {{- else }} + airflow-fernet-key: {{ randAlphaNum 32 | b64enc | b64enc | quote }} + {{- end }} + {{- if or (include "common.secrets.exists" (dict "secret" (include "common.names.fullname" .) "context" $)) (not (empty .Values.auth.secretKey)) }} + airflow-secret-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "airflow-secret-key" "providedValues" (list "auth.secretKey") "context" $) }} + {{- else }} + airflow-secret-key: {{ randAlphaNum 32 | b64enc | b64enc | quote }} + {{- end }} + {{- if (include "airflow.isImageMajorVersion3" .) }} + {{- if or (include "common.secrets.exists" (dict "secret" (include "common.names.fullname" .) "context" $)) (not (empty .Values.auth.jwtSecretKey)) }} + airflow-jwt-secret-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "airflow-jwt-secret-key" "providedValues" (list "auth.jwtSecretKey") "failOnNew" false "context" $) }} + {{- else }} + airflow-jwt-secret-key: {{ randAlphaNum 32 | b64enc | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/dag-processor/deployment.yaml b/addons/airflow/3/chart/airflow-3/templates/dag-processor/deployment.yaml new file mode 100644 index 00000000..4e74cdb9 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/dag-processor/deployment.yaml @@ -0,0 +1,272 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dagProcessor.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "airflow.dagProcessor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dag-processor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | trim | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dagProcessor.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dag-processor + {{- if not .Values.dagProcessor.autoscaling.hpa.enabled }} + replicas: {{ .Values.dagProcessor.replicaCount }} + {{- end }} + {{- if .Values.dagProcessor.updateStrategy }} + strategy: {{- toYaml .Values.dagProcessor.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: dag-processor + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + {{- if .Values.dagProcessor.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.dagProcessor.automountServiceAccountToken }} + {{- if .Values.dagProcessor.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dagProcessor.podAffinityPreset "component" "dag-processor" "customLabels" $podLabels "topologyKey" .Values.dagProcessor.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dagProcessor.podAntiAffinityPreset "component" "dag-processor" "customLabels" $podLabels "topologyKey" .Values.dagProcessor.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.dagProcessor.nodeAffinityPreset.type "key" .Values.dagProcessor.nodeAffinityPreset.key "values" .Values.dagProcessor.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.dagProcessor.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.dagProcessor.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.dagProcessor.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.priorityClassName }} + priorityClassName: {{ .Values.dagProcessor.priorityClassName | quote }} + {{- end }} + {{- if .Values.dagProcessor.schedulerName }} + schedulerName: {{ .Values.dagProcessor.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.dagProcessor.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dagProcessor.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- include "airflow.defaultInitContainers.waitForDBMigrations" . | nindent 8 }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-dag-processor + image: {{ include "airflow.image" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.dagProcessor.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dagProcessor.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.dagProcessor.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.dagProcessor.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_COMPONENT_TYPE + value: "dag-processor" + - name: AIRFLOW_EXECUTOR + value: {{ .Values.executor }} + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 12 }} + - name: PYTHONPYCACHEPREFIX + value: "/opt/drycc/airflow/venv/tmp" + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dagProcessor.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.dagProcessor.extraEnvVarsCM .Values.dagProcessor.extraEnvVarsSecret .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.dagProcessor.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.dagProcessor.extraEnvVarsCM }} + {{- end }} + {{- if .Values.dagProcessor.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.dagProcessor.extraEnvVarsSecret }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.dagProcessor.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.customLivenessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.dagProcessor.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dagProcessor.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - "airflow dag-processor" + {{- end }} + {{- if .Values.dagProcessor.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.customReadinessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.dagProcessor.readinessProbe.enabled }} + {{- $readinessTimeout := sub (int .Values.dagProcessor.readinessProbe.timeoutSeconds) 1 }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dagProcessor.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $readinessTimeout }} airflow jobs check --job-type DagProcessorJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- if .Values.dagProcessor.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.dagProcessor.startupProbe.enabled }} + {{- $startupTimeout := sub (int .Values.dagProcessor.startupProbe.timeoutSeconds) 1 }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dagProcessor.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $startupTimeout }} airflow jobs check --job-type DagProcessorJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- end }} + {{- if .Values.dagProcessor.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dagProcessor.resources }} + resources: {{- toYaml .Values.dagProcessor.resources | nindent 12 }} + {{- else if ne .Values.dagProcessor.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.dagProcessor.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + {{- if and .Values.web.containerSecurityContext.enabled .Values.web.containerSecurityContext.readOnlyRootFilesystem }} + - name: empty-dir + mountPath: /opt/drycc/airflow/venv/tmp + subPath: app-pyc-cache-dir + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dagProcessor.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultSidecars.syncDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultSidecars.syncPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dagProcessor.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.dagProcessor.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/dag-processor/hpa.yaml b/addons/airflow/3/chart/airflow-3/templates/dag-processor/hpa.yaml new file mode 100644 index 00000000..fdb963cb --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/dag-processor/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dagProcessor.enabled .Values.dagProcessor.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "airflow.dagProcessor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dag-processor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.dagProcessor.fullname" . }} + minReplicas: {{ .Values.dagProcessor.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.dagProcessor.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.dagProcessor.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.dagProcessor.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.dagProcessor.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.dagProcessor.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/dag-processor/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/dag-processor/networkpolicy.yaml new file mode 100644 index 00000000..5c973d5d --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/dag-processor/networkpolicy.yaml @@ -0,0 +1,76 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dagProcessor.enabled .Values.dagProcessor.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.dagProcessor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dag-processor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dagProcessor.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dag-processor + policyTypes: + - Ingress + - Egress + {{- if .Values.dagProcessor.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.http }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.service.ports.ingest }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.dagProcessor.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dagProcessor.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.service.ports.http }} + - port: {{ .Values.worker.containerPorts.http }} + {{- if not .Values.dagProcessor.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-dag-processor" (include "common.names.fullname" .) }}-client: "true" + {{- if .Values.dagProcessor.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.dagProcessor.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.dagProcessor.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.dagProcessor.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.dagProcessor.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dagProcessor.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/dag-processor/poddisruptionbudget.yaml b/addons/airflow/3/chart/airflow-3/templates/dag-processor/poddisruptionbudget.yaml new file mode 100644 index 00000000..06b5ca1b --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/dag-processor/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dagProcessor.enabled .Values.dagProcessor.pdb.create }} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ template "airflow.dagProcessor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dag-processor +spec: + {{- if .Values.dagProcessor.pdb.minAvailable }} + minAvailable: {{ .Values.dagProcessor.pdb.minAvailable }} + {{- end }} + {{- if or .Values.dagProcessor.pdb.maxUnavailable ( not .Values.dagProcessor.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.dagProcessor.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dagProcessor.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dag-processor +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/dag-processor/vpa.yaml b/addons/airflow/3/chart/airflow-3/templates/dag-processor/vpa.yaml new file mode 100644 index 00000000..9d7f0dd6 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/dag-processor/vpa.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dagProcessor.enabled (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.dagProcessor.autoscaling.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "airflow.dagProcessor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dag-processor + {{- if or .Values.dagProcessor.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dagProcessor.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: airflow-dag-processor + {{- with .Values.dagProcessor.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dagProcessor.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dagProcessor.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.dagProcessor.fullname" . }} + {{- if .Values.dagProcessor.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.dagProcessor.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/extra-list.yaml b/addons/airflow/3/chart/airflow-3/templates/extra-list.yaml new file mode 100644 index 00000000..9570df4a --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/metrics/configmap.yaml b/addons/airflow/3/chart/airflow-3/templates/metrics/configmap.yaml new file mode 100644 index 00000000..98406cb9 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/metrics/configmap.yaml @@ -0,0 +1,113 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.metrics.existingConfigmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "airflow.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +data: + mappings.yml: |- + {{- if .Values.metrics.configuration }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.configuration "context" .) | indent 4 }} + {{- else }} + mappings: + - match: airflow.dagrun.dependency-check.*.* + name: "airflow_dagrun_dependency_check" + labels: + dag_id: "$1" + + - match: airflow.operator_successes_(.*) + match_type: regex + name: "airflow_operator_successes" + labels: + operator: "$1" + + - match: airflow.operator_failures_(.*) + match_type: regex + name: "airflow_operator_failures" + labels: + operator: "$1" + + - match: airflow.scheduler_heartbeat + match_type: regex + name: "airflow_scheduler_heartbeat" + labels: + type: counter + + - match: airflow.dag_processor_heartbeat + match_type: regex + name: "airflow_dag_processor_heartbeat" + labels: + type: counter + + - match: airflow.dag.*.*.duration + name: "airflow_task_duration" + labels: + dag_id: "$1" + task_id: "$2" + + - match: airflow.dagrun.duration.success.* + name: "airflow_dagrun_duration" + labels: + dag_id: "$1" + + - match: airflow.dagrun.duration.failed.* + name: "airflow_dagrun_failed" + labels: + dag_id: "$1" + + - match: airflow.dagrun.schedule_delay.* + name: "airflow_dagrun_schedule_delay" + labels: + dag_id: "$1" + + - match: airflow.dag_processing.last_runtime.* + name: "airflow_dag_processing_last_runtime" + labels: + dag_file: "$1" + + - match: airflow.dag_processing.last_run.seconds_ago.* + name: "airflow_dag_processing_last_run_seconds_ago" + labels: + dag_file: "$1" + + - match: airflow.pool.open_slots.* + name: "airflow_pool_open_slots" + labels: + pool: "$1" + + - match: airflow.pool.used_slots.* + name: "airflow_pool_used_slots" + labels: + pool: "$1" + + - match: airflow.pool.starving_tasks.* + name: "airflow_pool_starving_tasks" + labels: + pool: "$1" + + - match: airflow.executor.open_slots.* + name: "airflow_executor_open_slots" + labels: + executor: "$1" + + - match: airflow.executor.queued_tasks.* + name: "airflow_executor_queued_tasks" + labels: + executor: "$1" + + - match: airflow.executor.running_tasks.* + name: "airflow_executor_running_tasks" + labels: + executor: "$1" + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/metrics/deployment.yaml b/addons/airflow/3/chart/airflow-3/templates/metrics/deployment.yaml new file mode 100644 index 00000000..3a7dafb2 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/metrics/deployment.yaml @@ -0,0 +1,143 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "airflow.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.podLabels .Values.commonLabels $versionLabel ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/metrics/configmap.yaml") . | sha256sum }} + {{- if .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: metrics + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.metrics.automountServiceAccountToken }} + {{- if .Values.metrics.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "customLabels" $podLabels "topologyKey" .Values.metrics.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "customLabels" $podLabels "topologyKey" .Values.metrics.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.metrics.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.metrics.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.priorityClassName }} + priorityClassName: {{ .Values.metrics.priorityClassName }} + {{- end }} + {{- if .Values.metrics.schedulerName }} + schedulerName: {{ .Values.metrics.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.metrics.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + containers: + - name: statsd-exporter + image: {{ include "airflow.metrics.image" . | quote }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + args: + - "--statsd.mapping-config=/config/mappings.yml" + {{- end }} + {{- if .Values.metrics.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + - name: ingest + containerPort: {{ .Values.metrics.containerPorts.ingest }} + protocol: UDP + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- else if ne .Values.metrics.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: configuration + mountPath: /config/mappings.yml + subPath: mappings.yml + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.metrics.configMapName" . }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/metrics/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/metrics/networkpolicy.yaml new file mode 100644 index 00000000..a278e860 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/metrics/networkpolicy.yaml @@ -0,0 +1,68 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: metrics + policyTypes: + - Ingress + - Egress + {{- if .Values.metrics.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.metrics.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.metrics.containerPorts.ingest }} + protocol: UDP + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.metrics.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-client" (include "airflow.metrics.fullname" .) }}: "true" + {{- if .Values.metrics.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.metrics.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.metrics.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.metrics.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/metrics/service.yaml b/addons/airflow/3/chart/airflow-3/templates/metrics/service.yaml new file mode 100644 index 00000000..c4b1dacd --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/metrics/service.yaml @@ -0,0 +1,38 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "airflow.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: udp-ingest + port: {{ .Values.metrics.service.ports.ingest }} + protocol: UDP + targetPort: ingest + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + protocol: TCP + targetPort: metrics + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/metrics/servicemonitor.yaml b/addons/airflow/3/chart/airflow-3/templates/metrics/servicemonitor.yaml new file mode 100644 index 00000000..7e726628 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/metrics/servicemonitor.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "airflow.metrics.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.metrics.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: /metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/rbac/role.yaml b/addons/airflow/3/chart/airflow-3/templates/rbac/role.yaml new file mode 100644 index 00000000..6eabe13a --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/rbac/role.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - "pods" + verbs: + - "create" + - "list" + - "get" + - "watch" + - "delete" + - "patch" + - apiGroups: + - "" + resources: + - "pods/log" + verbs: + - "get" + - apiGroups: + - "" + resources: + - "pods/exec" + verbs: + - "create" + - "get" + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} + diff --git a/addons/airflow/3/chart/airflow-3/templates/rbac/rolebinding.yaml b/addons/airflow/3/chart/airflow-3/templates/rbac/rolebinding.yaml new file mode 100644 index 00000000..b552beff --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/rbac/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "airflow.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/rbac/serviceaccount.yaml b/addons/airflow/3/chart/airflow-3/templates/rbac/serviceaccount.yaml new file mode 100644 index 00000000..04f1d446 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/rbac/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "airflow.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/deployment.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/deployment.yaml new file mode 100644 index 00000000..4d188c5e --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/deployment.yaml @@ -0,0 +1,312 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $kube := (contains "KubernetesExecutor" .Values.executor) -}} +{{- $isStatefulset := (eq "true" (include "airflow.scheduler.requiresStatefulset" .)) -}} +apiVersion: {{ ternary (include "common.capabilities.statefulset.apiVersion" .) (include "common.capabilities.deployment.apiVersion" .) $isStatefulset }} +kind: {{ ternary "StatefulSet" "Deployment" $isStatefulset }} +metadata: + name: {{ template "airflow.scheduler.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | trim | nindent 4 }} + {{- end }} +spec: + {{- if $isStatefulset }} + serviceName: {{ include "airflow.scheduler.serviceName" . }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.scheduler.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: scheduler + {{- if not .Values.scheduler.autoscaling.hpa.enabled }} + replicas: {{ .Values.scheduler.replicaCount }} + {{- end }} + {{- if .Values.scheduler.updateStrategy }} + {{ ternary "updateStrategy" "strategy" $isStatefulset }}: {{- toYaml .Values.scheduler.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: scheduler + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + {{- if .Values.scheduler.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.scheduler.automountServiceAccountToken }} + {{- if .Values.scheduler.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.scheduler.podAffinityPreset "component" "scheduler" "customLabels" $podLabels "topologyKey" .Values.scheduler.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.scheduler.podAntiAffinityPreset "component" "scheduler" "customLabels" $podLabels "topologyKey" .Values.scheduler.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.scheduler.nodeAffinityPreset.type "key" .Values.scheduler.nodeAffinityPreset.key "values" .Values.scheduler.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.scheduler.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.scheduler.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.scheduler.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.priorityClassName }} + priorityClassName: {{ .Values.scheduler.priorityClassName | quote }} + {{- end }} + {{- if .Values.scheduler.schedulerName }} + schedulerName: {{ .Values.scheduler.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.scheduler.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.scheduler.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- if and .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- include "airflow.defaultInitContainers.waitForDBMigrations" . | nindent 8 }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-scheduler + image: {{ include "airflow.image" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.scheduler.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.scheduler.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.scheduler.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.scheduler.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_COMPONENT_TYPE + value: "scheduler" + - name: AIRFLOW_EXECUTOR + value: {{ .Values.executor }} + - name: PYTHONPYCACHEPREFIX + value: "/opt/drycc/airflow/venv/tmp" + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.scheduler.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.scheduler.extraEnvVarsCM .Values.scheduler.extraEnvVarsSecret .Values.scheduler.extraEnvVarsSecret .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.extraEnvVarsSecrets }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.scheduler.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.scheduler.extraEnvVarsCM }} + {{- end }} + {{- if .Values.scheduler.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.scheduler.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.extraEnvVarsSecrets }} + {{- range .Values.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.scheduler.extraEnvVarsSecrets }} + {{- range .Values.scheduler.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.scheduler.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.customLivenessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.scheduler.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.scheduler.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - "airflow scheduler" + {{- end }} + {{- if .Values.scheduler.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.customReadinessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.scheduler.readinessProbe.enabled }} + {{- $readinessTimeout := sub (int .Values.scheduler.readinessProbe.timeoutSeconds) 1 }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.scheduler.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $readinessTimeout }} airflow jobs check --job-type SchedulerJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- if .Values.scheduler.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.scheduler.startupProbe.enabled }} + {{- $startupTimeout := sub (int .Values.scheduler.startupProbe.timeoutSeconds) 1 }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.scheduler.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $startupTimeout }} airflow jobs check --job-type SchedulerJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- end }} + {{- if .Values.scheduler.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.scheduler.resources }} + resources: {{- toYaml .Values.scheduler.resources | nindent 12 }} + {{- else if ne .Values.scheduler.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.scheduler.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + {{- if .Values.requirements }} + - name: req-config + mountPath: /drycc/python/requirements.txt + subPath: requirements.txt + {{- end }} + {{- if .Values.pip }} + - name: req-config + mountPath: /opt/drycc/.config/pip/pip.conf + subPath: pip.conf + {{- end }} + {{- if and .Values.web.containerSecurityContext.enabled .Values.web.containerSecurityContext.readOnlyRootFilesystem }} + - name: empty-dir + mountPath: /opt/drycc/airflow/venv/tmp + subPath: app-pyc-cache-dir + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if $kube }} + - name: pod-template + mountPath: /opt/drycc/airflow/config/pod_template.yaml + subPath: pod_template.yaml + {{- end }} + {{- if and .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.scheduler.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and .Values.dags.enabled }} + {{- include "airflow.defaultSidecars.syncDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultSidecars.syncPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- end }} + {{- if $kube }} + - name: pod-template + configMap: + name: {{ printf "%s-pod-template" (include "common.names.fullname" .) }} + {{- end }} + {{- if and .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.scheduler.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.scheduler.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.requirements .Values.pip }} + - name: req-config + configMap: + name: {{ include "common.names.fullname" . }}-req + {{- end }} \ No newline at end of file diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/hpa.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/hpa.yaml new file mode 100644 index 00000000..4e9f0667 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.scheduler.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "airflow.scheduler.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.scheduler.fullname" . }} + minReplicas: {{ .Values.scheduler.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.scheduler.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.scheduler.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.scheduler.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.scheduler.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.scheduler.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/networkpolicy.yaml new file mode 100644 index 00000000..625f3d71 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/networkpolicy.yaml @@ -0,0 +1,77 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.scheduler.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.scheduler.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.scheduler.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: scheduler + policyTypes: + - Ingress + - Egress + {{- if .Values.scheduler.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.http }} + - port: {{ .Values.worker.containerPorts.http }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.service.ports.ingest }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.scheduler.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.scheduler.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.service.ports.http }} + - port: {{ .Values.worker.containerPorts.http }} + {{- if not .Values.scheduler.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-scheduler" (include "common.names.fullname" .) }}-client: "true" + {{- if .Values.scheduler.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.scheduler.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.scheduler.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.scheduler.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.scheduler.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.scheduler.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/poddisruptionbudget.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/poddisruptionbudget.yaml new file mode 100644 index 00000000..c3e701c2 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.scheduler.pdb.create }} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ template "airflow.scheduler.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler +spec: + {{- if .Values.scheduler.pdb.minAvailable }} + minAvailable: {{ .Values.scheduler.pdb.minAvailable }} + {{- end }} + {{- if or .Values.scheduler.pdb.maxUnavailable ( not .Values.scheduler.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.scheduler.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.scheduler.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: scheduler +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/service-headless.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/service-headless.yaml new file mode 100644 index 00000000..8e8c5504 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/service-headless.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "airflow.scheduler.requiresStatefulset" .) -}} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "airflow.scheduler.serviceName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: task-logs + port: {{ .Values.worker.containerPorts.http }} + targetPort: task-logs + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.scheduler.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/scheduler/vpa.yaml b/addons/airflow/3/chart/airflow-3/templates/scheduler/vpa.yaml new file mode 100644 index 00000000..b10b4f98 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/scheduler/vpa.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.scheduler.autoscaling.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "airflow.scheduler.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: scheduler + {{- if or .Values.scheduler.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.scheduler.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: airflow-scheduler + {{- with .Values.scheduler.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.scheduler.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.scheduler.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.scheduler.fullname" . }} + {{- if .Values.scheduler.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.scheduler.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml b/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml new file mode 100644 index 00000000..2bff7cc8 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml @@ -0,0 +1,199 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.setupDBJob.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ printf "%s-setup-db" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: setup-db + {{- $defaultAnnotations := dict "helm.sh/hook" "post-install,post-upgrade" "helm.sh/hook-delete-policy" "before-hook-creation,hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.setupDBJob.annotations .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + backoffLimit: {{ .Values.setupDBJob.backoffLimit }} + template: + metadata: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.setupDBJob.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: setup-db + {{- if .Values.setupDBJob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + restartPolicy: OnFailure + {{- if .Values.setupDBJob.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.setupDBJob.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + automountServiceAccountToken: {{ .Values.setupDBJob.automountServiceAccountToken }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.setupDBJob.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.setupDBJob.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.setupDBJob.podAffinityPreset "component" "setup-db" "customLabels" $podLabels "topologyKey" .Values.setupDBJob.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.setupDBJob.podAntiAffinityPreset "component" "setup-db" "customLabels" $podLabels "topologyKey" .Values.setupDBJob.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.setupDBJob.nodeAffinityPreset.type "key" .Values.setupDBJob.nodeAffinityPreset.key "values" .Values.setupDBJob.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.setupDBJob.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.setupDBJob.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.setupDBJob.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.setupDBJob.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.setupDBJob.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.setupDBJob.priorityClassName }} + priorityClassName: {{ .Values.setupDBJob.priorityClassName | quote }} + {{- end }} + {{- if .Values.setupDBJob.schedulerName }} + schedulerName: {{ .Values.setupDBJob.schedulerName }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- if .Values.setupDBJob.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: setup-db + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.setupDBJob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.setupDBJob.containerSecurityContext "context" .) | nindent 12 }} + {{- end }} + {{- if .Values.setupDBJob.resources }} + resources: {{- toYaml .Values.setupDBJob.resources | nindent 12 }} + {{- else if ne .Values.setupDBJob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.setupDBJob.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.setupDBJob.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.command "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - /bin/bash + {{- end }} + {{- if .Values.setupDBJob.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + . /opt/drycc/scripts/airflow-env.sh + . /opt/drycc/scripts/libairflow.sh + + info "Trying to connect to the database server" + airflow_wait_for_db_connection + + if ! airflow_execute db check-migrations; then + info "Populating database" + {{- if (include "airflow.isImageMajorVersion2" .) }} + airflow_execute db init + {{- else }} + airflow_execute db migrate + {{- end }} + + airflow_create_admin_user + info "Synchronizing internal metadata" + airflow_execute sync-perm --include-dags + true + else + info "Upgrading database schema" + {{- if (include "airflow.isImageMajorVersion2" .) }} + airflow_execute db upgrade + {{- else }} + airflow_execute db migrate + {{- end }} + true + fi + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: AIRFLOW_USERNAME + value: {{ .Values.auth.username }} + {{- if .Values.usePasswordFiles }} + - name: AIRFLOW_PASSWORD_FILE + value: "/opt/drycc/airflow/secrets/airflow-password" + {{- else }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "airflow.secretName" . }} + key: airflow-password + {{- end}} + {{- if .Values.setupDBJob.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.setupDBJob.extraEnvVarsCM .Values.setupDBJob.extraEnvVarsSecret }} + envFrom: + {{- if .Values.setupDBJob.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.setupDBJob.extraEnvVarsCM }} + {{- end }} + {{- if .Values.setupDBJob.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.setupDBJob.extraEnvVarsSecret }} + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if .Values.setupDBJob.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- end }} + {{- if .Values.setupDBJob.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.setupDBJob.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/hpa.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/hpa.yaml new file mode 100644 index 00000000..bde2e13f --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.triggerer.enabled .Values.triggerer.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "airflow.triggerer.fullname" . }} + minReplicas: {{ .Values.triggerer.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.triggerer.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.triggerer.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.triggerer.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.triggerer.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.triggerer.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/networkpolicy.yaml new file mode 100644 index 00000000..2ed69e0b --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/networkpolicy.yaml @@ -0,0 +1,75 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.triggerer.enabled .Values.triggerer.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: triggerer + policyTypes: + - Ingress + - Egress + {{- if .Values.triggerer.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.http }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.service.ports.ingest }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.triggerer.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.triggerer.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.triggerer.containerPorts.logs }} + {{- if not .Values.triggerer.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-triggerer" (include "common.names.fullname" .) }}-client: "true" + {{- if .Values.triggerer.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.triggerer.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.triggerer.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.triggerer.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.triggerer.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.triggerer.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/poddisruptionbudget.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/poddisruptionbudget.yaml new file mode 100644 index 00000000..be95f3ba --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.triggerer.enabled .Values.triggerer.pdb.create }} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer +spec: + {{- if .Values.triggerer.pdb.minAvailable }} + minAvailable: {{ .Values.triggerer.pdb.minAvailable }} + {{- end }} + {{- if or .Values.triggerer.pdb.maxUnavailable ( not .Values.triggerer.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.triggerer.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: triggerer +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/service.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/service.yaml new file mode 100644 index 00000000..56b8797c --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/service.yaml @@ -0,0 +1,54 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.triggerer.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer + {{- if or .Values.triggerer.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.triggerer.service.type }} + {{- if .Values.triggerer.service.sessionAffinity }} + sessionAffinity: {{ .Values.triggerer.service.sessionAffinity }} + {{- end }} + {{- if .Values.triggerer.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if and .Values.triggerer.service.clusterIP (eq .Values.triggerer.service.type "ClusterIP") }} + clusterIP: {{ .Values.triggerer.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.triggerer.service.type "LoadBalancer") (eq .Values.triggerer.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.triggerer.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.triggerer.service.type "LoadBalancer") (not (empty .Values.triggerer.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.triggerer.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.triggerer.service.type "LoadBalancer") (not (empty .Values.triggerer.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.triggerer.service.loadBalancerIP }} + {{- end }} + ports: + - name: logs + port: {{ .Values.triggerer.service.ports.logs }} + protocol: TCP + targetPort: logs + {{- if (and (or (eq .Values.triggerer.service.type "NodePort") (eq .Values.triggerer.service.type "LoadBalancer")) (not (empty .Values.triggerer.service.nodePorts.logs))) }} + nodePort: {{ .Values.triggerer.service.nodePorts.logs }} + {{- else if eq .Values.triggerer.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.triggerer.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/statefulset.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/statefulset.yaml new file mode 100644 index 00000000..28b00359 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/statefulset.yaml @@ -0,0 +1,324 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.triggerer.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | trim | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.triggerer.autoscaling.hpa.enabled }} + replicas: {{ .Values.triggerer.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: triggerer + {{- if .Values.triggerer.podManagementPolicy }} + podManagementPolicy: {{ .Values.triggerer.podManagementPolicy | quote }} + {{- end }} + serviceName: {{ template "airflow.triggerer.fullname" . }} + {{- if .Values.triggerer.updateStrategy }} + updateStrategy: {{- toYaml .Values.triggerer.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: triggerer + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + {{- if .Values.triggerer.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.triggerer.automountServiceAccountToken }} + {{- if .Values.triggerer.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.triggerer.podAffinityPreset "component" "triggerer" "customLabels" $podLabels "topologyKey" .Values.triggerer.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.triggerer.podAntiAffinityPreset "component" "triggerer" "customLabels" $podLabels "topologyKey" .Values.triggerer.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.triggerer.nodeAffinityPreset.type "key" .Values.triggerer.nodeAffinityPreset.key "values" .Values.triggerer.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.triggerer.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.triggerer.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.triggerer.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.priorityClassName }} + priorityClassName: {{ .Values.triggerer.priorityClassName | quote }} + {{- end }} + {{- if .Values.triggerer.schedulerName }} + schedulerName: {{ .Values.triggerer.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.triggerer.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.triggerer.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- include "airflow.defaultInitContainers.waitForDBMigrations" . | nindent 8 }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-triggerer + image: {{ include "airflow.image" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.triggerer.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.triggerer.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.triggerer.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.triggerer.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_COMPONENT_TYPE + value: "triggerer" + - name: AIRFLOW_EXECUTOR + value: {{ .Values.executor }} + - name: PYTHONPYCACHEPREFIX + value: "/opt/drycc/airflow/venv/tmp" + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.triggerer.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.triggerer.extraEnvVarsCM .Values.triggerer.extraEnvVarsSecret .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.triggerer.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.triggerer.extraEnvVarsCM }} + {{- end }} + {{- if .Values.triggerer.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.triggerer.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: logs + containerPort: {{ .Values.triggerer.containerPorts.logs }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.triggerer.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.customLivenessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.triggerer.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.triggerer.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - "airflow triggerer" + {{- end }} + {{- if .Values.triggerer.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.customReadinessProbe "context" $) | trim | nindent 12 }} + {{- else if .Values.triggerer.readinessProbe.enabled }} + {{- $readinessTimeout := sub (int .Values.triggerer.readinessProbe.timeoutSeconds) 1 }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.triggerer.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $readinessTimeout }} airflow jobs check --job-type TriggererJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- if .Values.triggerer.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.triggerer.startupProbe.enabled }} + {{- $startupTimeout := sub (int .Values.triggerer.startupProbe.timeoutSeconds) 1 }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.triggerer.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /bin/bash + - -ec + - | + export CONNECTION_CHECK_MAX_COUNT=0 + timeout {{ $startupTimeout }} airflow jobs check --job-type TriggererJob --local --limit 0 {{- if not .Values.diagnosticMode.enabled }} 2>/dev/null {{- end }} + {{- end }} + {{- end }} + {{- if .Values.triggerer.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.triggerer.resources }} + resources: {{- toYaml .Values.triggerer.resources | nindent 12 }} + {{- else if ne .Values.triggerer.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.triggerer.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + {{- if and .Values.web.containerSecurityContext.enabled .Values.web.containerSecurityContext.readOnlyRootFilesystem }} + - name: empty-dir + mountPath: /opt/drycc/airflow/venv/tmp + subPath: app-pyc-cache-dir + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if .Values.triggerer.persistence.enabled }} + - name: logs + mountPath: /opt/drycc/airflow/logs + {{- else }} + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.triggerer.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultSidecars.syncDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultSidecars.syncPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.triggerer.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if or (not .Values.triggerer.persistence.enabled) (not (empty .Values.triggerer.persistence.existingClaim)) }} + - name: logs + {{- if not (empty .Values.triggerer.persistence.existingClaim) }} + persistentVolumeClaim: + claimName: {{ tpl .Values.triggerer.persistence.existingClaim . }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + {{- if .Values.triggerer.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.triggerer.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.triggerer.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: logs + {{- if or .Values.triggerer.persistence.annotations .Values.commonAnnotations }} + {{- $claimAnnotations := include "common.tplvalues.merge" (dict "values" .Values.triggerer.persistence.annotations .Values.commonAnnotations "context" .) | fromYaml }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.triggerer.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.triggerer.persistence.size | quote }} + {{- if .Values.triggerer.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.triggerer.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.triggerer.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.triggerer.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/triggerer/vpa.yaml b/addons/airflow/3/chart/airflow-3/templates/triggerer/vpa.yaml new file mode 100644 index 00000000..0ab0eaf0 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/triggerer/vpa.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.triggerer.enabled (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.triggerer.autoscaling.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "airflow.triggerer.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: triggerer + {{- if or .Values.triggerer.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.triggerer.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: airflow-triggerer + {{- with .Values.triggerer.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.triggerer.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.triggerer.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: Statefulset + name: {{ template "airflow.triggerer.fullname" . }} + {{- if .Values.triggerer.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.triggerer.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/certs.yaml b/addons/airflow/3/chart/airflow-3/templates/web/certs.yaml new file mode 100644 index 00000000..62bb377d --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/certs.yaml @@ -0,0 +1,86 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.web.tls.enabled .Values.web.tls.autoGenerated.enabled (eq .Values.web.tls.autoGenerated.engine "cert-manager") }} +{{- if empty .Values.web.tls.autoGenerated.certManager.existingIssuer }} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-clusterissuer" (include "airflow.web.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selfSigned: {} +--- +{{- end }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-ca-crt" (include "airflow.web.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ printf "%s-ca-crt" (include "airflow.web.fullname" .) }} + commonName: {{ printf "%s-root-ca" (include "airflow.web.fullname" .) }} + isCA: true + issuerRef: + name: {{ default (printf "%s-clusterissuer" (include "airflow.web.fullname" .)) .Values.web.tls.autoGenerated.certManager.existingIssuer }} + kind: {{ default "Issuer" .Values.web.tls.autoGenerated.certManager.existingIssuerKind }} +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-ca-issuer" (include "airflow.web.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + ca: + secretName: {{ printf "%s-ca-crt" (include "airflow.web.fullname" .) }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-crt" (include "airflow.web.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ template "airflow.web.tls.secretName" . }} + commonName: {{ printf "%s.%s.svc.%s" (include "airflow.web.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} + issuerRef: + name: {{ printf "%s-ca-issuer" (include "common.names.fullname" .) }} + kind: Issuer + subject: + organizations: + - "KeyDB" + dnsNames: + - '*.{{ include "common.names.namespace" . }}' + - '*.{{ include "common.names.namespace" . }}.svc' + - '*.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + - '*.{{ include "airflow.web.fullname" . }}' + - '*.{{ include "airflow.web.fullname" . }}.{{ include "common.names.namespace" . }}' + - '*.{{ include "airflow.web.fullname" . }}.{{ include "common.names.namespace" . }}.svc' + - '*.{{ include "airflow.web.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + privateKey: + algorithm: {{ .Values.web.tls.autoGenerated.certManager.keyAlgorithm }} + size: {{ int .Values.web.tls.autoGenerated.certManager.keySize }} + duration: {{ .Values.web.tls.autoGenerated.certManager.duration }} + renewBefore: {{ .Values.web.tls.autoGenerated.certManager.renewBefore }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/configmap.yaml b/addons/airflow/3/chart/airflow-3/templates/web/configmap.yaml new file mode 100644 index 00000000..530f0bbb --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/configmap.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.web.existingConfigmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +data: + webserver_config.py: |- + {{- if .Values.web.configuration }} + {{ include "common.tplvalues.render" (dict "value" .Values.web.configuration "context" .) | nindent 4 }} + {{- else }} + """Configuration for the Airflow webserver""" + + from __future__ import annotations + + import os + + {{- if .Values.ldap.enabled }} + from airflow.www.fab_security.manager import AUTH_LDAP + + AUTH_TYPE = AUTH_LDAP + AUTH_LDAP_SERVER = {{ .Values.ldap.uri | squote }} + AUTH_LDAP_SEARCH = {{ .Values.ldap.basedn | squote }} + AUTH_LDAP_UID_FIELD = {{ .Values.ldap.searchAttribute | squote }} + AUTH_LDAP_BIND_USER = {{ .Values.ldap.binddn | squote }} + AUTH_USER_REGISTRATION = {{ .Values.ldap.userRegistration | squote }} + AUTH_USER_REGISTRATION_ROLE = {{ .Values.ldap.userRegistrationRole | squote }} + AUTH_ROLES_MAPPING = {{ .Values.ldap.rolesMapping }} + AUTH_ROLES_SYNC_AT_LOGIN = {{ .Values.ldap.rolesSyncAtLogin }} + {{- if .Values.ldap.tls.enabled }} + AUTH_LDAP_ALLOW_SELF_SIGNED = {{ ternary "True" "False" .Values.ldap.tls.allowSelfSigned }} + AUTH_LDAP_TLS_CACERTFILE = {{ printf "%s/%s" .Values.ldap.tls.certificatesMountPath .Values.ldap.tls.CAFilename | squote }} + {{- end }} + {{- else }} + {{- if (include "airflow.isImageMajorVersion2" .) }} + from airflow.www.fab_security.manager import AUTH_DB + {{- else }} + from flask_appbuilder.const import AUTH_DB + {{- end }} + + AUTH_TYPE = AUTH_DB + {{- end }} + + basedir = os.path.abspath(os.path.dirname(__file__)) + + # Flask-WTF flag for CSRF + WTF_CSRF_ENABLED = True + WTF_CSRF_TIME_LIMIT = None + {{- if .Values.web.extraConfiguration }} + {{ include "common.tplvalues.render" (dict "value" .Values.web.extraConfiguration "context" .) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/deployment.yaml b/addons/airflow/3/chart/airflow-3/templates/web/deployment.yaml new file mode 100644 index 00000000..ec2000d6 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/deployment.yaml @@ -0,0 +1,346 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.web.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: web + {{- if not .Values.web.autoscaling.hpa.enabled }} + replicas: {{ .Values.web.replicaCount }} + {{- end }} + {{- if .Values.web.updateStrategy }} + strategy: {{- toYaml .Values.web.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: web + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + checksum/webserver-configmap: {{ include (print $.Template.BasePath "/web/configmap.yaml") . | sha256sum }} + {{- if .Values.web.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.web.automountServiceAccountToken }} + {{- if .Values.web.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.web.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.web.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.web.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.web.podAffinityPreset "component" "web" "customLabels" $podLabels "topologyKey" .Values.web.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.web.podAntiAffinityPreset "component" "web" "customLabels" $podLabels "topologyKey" .Values.web.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.web.nodeAffinityPreset.type "key" .Values.web.nodeAffinityPreset.key "values" .Values.web.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.web.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.web.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.web.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.web.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.web.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.web.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.web.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.web.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.web.priorityClassName }} + priorityClassName: {{ .Values.web.priorityClassName | quote }} + {{- end }} + {{- if .Values.web.schedulerName }} + schedulerName: {{ .Values.web.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.web.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.web.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- include "airflow.defaultInitContainers.prepareWebConfig" . | nindent 8 }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.setupDBJob.enabled }} + {{- include "airflow.defaultInitContainers.waitForDBMigrations" . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.web.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-web + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.web.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.web.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.web.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.web.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.web.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.web.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_SKIP_DB_SETUP + value: {{ ternary "yes" "no" .Values.setupDBJob.enabled | quote }} + {{- if not .Values.setupDBJob.enabled }} + - name: AIRFLOW_USERNAME + value: {{ .Values.auth.username }} + {{- if .Values.usePasswordFiles }} + - name: AIRFLOW_PASSWORD_FILE + value: "/opt/drycc/airflow/secrets/airflow-password" + {{- else }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "airflow.secretName" . }} + key: airflow-password + {{- end }} + {{- end }} + - name: PYTHONPYCACHEPREFIX + value: "/opt/drycc/airflow/venv/tmp" + {{- if (include "airflow.isImageMajorVersion2" .) }} + - name: AIRFLOW_COMPONENT_TYPE + value: "webserver" + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.web.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.web.extraEnvVarsCM .Values.web.extraEnvVarsSecret .Values.web.extraEnvVarsSecrets .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.extraEnvVarsSecrets }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.web.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.web.extraEnvVarsCM }} + {{- end }} + {{- if .Values.web.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.web.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.extraEnvVarsSecrets }} + {{- range .Values.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.web.extraEnvVarsSecrets }} + {{- range .Values.web.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.web.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.web.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.web.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.web.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.web.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.web.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.web.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.web.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.web.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - airflow + {{- end }} + {{- if .Values.web.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.web.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.web.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.web.readinessProbe "enabled") "context" $) | nindent 12 }} + {{- if contains "127.0.0.1" (include "airflow.baseUrl" .) }} + httpGet: + {{- if (include "airflow.isImageMajorVersion3" .) }} + path: /api/v2/monitor/health + {{- else }} + path: /health + {{- end }} + port: http + scheme: {{ ternary "HTTPS" "HTTP" .Values.web.tls.enabled }} + {{- else }} + tcpSocket: + port: http + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.web.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.web.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.web.resources }} + resources: {{- toYaml .Values.web.resources | nindent 12 }} + {{- else if ne .Values.web.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.web.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + - name: empty-dir + mountPath: /opt/drycc/airflow/webserver_config.py + subPath: app-base-dir/webserver_config.py + {{- if .Values.requirements }} + - name: req-config + mountPath: /drycc/python/requirements.txt + subPath: requirements.txt + {{- end }} + {{- if .Values.pip }} + - name: req-config + mountPath: /opt/drycc/.config/pip/pip.conf + subPath: pip.conf + {{- end }} + {{- if and .Values.web.containerSecurityContext.enabled .Values.web.containerSecurityContext.readOnlyRootFilesystem }} + - name: empty-dir + mountPath: /opt/drycc/airflow/venv/tmp + subPath: app-pyc-cache-dir + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if and .Values.ldap.enabled .Values.ldap.tls.enabled }} + - name: airflow-ldap-ca-certificate + mountPath: {{ .Values.ldap.tls.certificatesMountPath }} + readOnly: true + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + mountPath: /opt/drycc/airflow/certs + readOnly: true + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.web.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and .Values.dags.enabled }} + {{- include "airflow.defaultSidecars.syncDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultSidecars.syncPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.web.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + {{- if or .Values.requirements .Values.pip }} + - name: req-config + configMap: + name: {{ include "common.names.fullname" . }}-req + {{- end }} + - name: webserver-configuration + configMap: + name: {{ include "airflow.web.configMapName" . }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- if .Values.ldap.enabled }} + - secret: + name: {{ include "airflow.ldap.secretName" . }} + {{- end }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if and .Values.ldap.enabled .Values.ldap.tls.enabled }} + - name: airflow-ldap-ca-certificate + secret: + secretName: {{ required "A secret containing the LDAP CA certificate. It is required when SSL in enabled" .Values.ldap.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + secret: + secretName: {{ template "airflow.web.tls.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.web.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.web.extraVolumes "context" $) | nindent 8 }} + {{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/hpa.yaml b/addons/airflow/3/chart/airflow-3/templates/web/hpa.yaml new file mode 100644 index 00000000..98a03a57 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.web.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.web.fullname" . }} + minReplicas: {{ .Values.web.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.web.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.web.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.web.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.web.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.web.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/ingress-tls-secret.yaml b/addons/airflow/3/chart/airflow-3/templates/web/ingress-tls-secret.yaml new file mode 100644 index 00000000..cb0bbd8f --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/ingress-tls-secret.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "airflow-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/ingress.yaml b/addons/airflow/3/chart/airflow-3/templates/web/ingress.yaml new file mode 100644 index 00000000..17fa5256 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/ingress.yaml @@ -0,0 +1,55 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + pathType: {{ .Values.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "airflow.web.fullname" .) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "airflow.web.fullname" $) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/web/networkpolicy.yaml new file mode 100644 index 00000000..5432754f --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/networkpolicy.yaml @@ -0,0 +1,77 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.web.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.web.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: web + policyTypes: + - Ingress + - Egress + {{- if .Values.web.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.web.containerPorts.http }} + - port: {{ .Values.service.ports.http }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.service.ports.ingest }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.web.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.web.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.web.containerPorts.http }} + - port: {{ .Values.service.ports.http }} + {{- if not .Values.web.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-web" (include "common.names.fullname" .) }}-client: "true" + {{- if .Values.web.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.web.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.web.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.web.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.web.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.web.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/poddisruptionbudget.yaml b/addons/airflow/3/chart/airflow-3/templates/web/poddisruptionbudget.yaml new file mode 100644 index 00000000..0a2bccb3 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.web.pdb.create }} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web +spec: + {{- if .Values.web.pdb.minAvailable }} + minAvailable: {{ .Values.web.pdb.minAvailable }} + {{- end }} + {{- if or .Values.web.pdb.maxUnavailable ( not .Values.web.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.web.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.web.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: web +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/service.yaml b/addons/airflow/3/chart/airflow-3/templates/web/service.yaml new file mode 100644 index 00000000..9d6615c3 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/service.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.ports.http }} + protocol: TCP + targetPort: http + {{- if (and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http))) }} + nodePort: {{ .Values.service.nodePorts.http }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.web.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web diff --git a/addons/airflow/3/chart/airflow-3/templates/web/tls-secret.yaml b/addons/airflow/3/chart/airflow-3/templates/web/tls-secret.yaml new file mode 100644 index 00000000..22eecb8d --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/tls-secret.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.web.tls.enabled .Values.web.tls.autoGenerated.enabled (eq .Values.web.tls.autoGenerated.engine "helm") -}} +{{- $ca := genCA "airflow-web-ca" 365 }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "airflow.web.fullname" . }} +{{- $altNames := list (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +{{- $secretName := include "airflow.web.tls.secretName" . }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} +{{- else if and .Values.web.tls.enabled (not .Values.web.tls.autoGenerated.enabled) (empty .Values.web.tls.existingCASecret) (empty .Values.web.tls.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "airflow.web.tls.secretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ required "A valid .Values.web.tls.ca entry required!" .Values.web.tls.ca | b64enc | quote }} + tls.crt: {{ required "A valid .Values.web.tls.cert entry required!" .Values.web.tls.cert | b64enc | quote }} + tls.key: {{ required "A valid .Values.web.tls.key entry required!" .Values.web.tls.key | b64enc | quote }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/web/vpa.yaml b/addons/airflow/3/chart/airflow-3/templates/web/vpa.yaml new file mode 100644 index 00000000..2c0828da --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/web/vpa.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.web.autoscaling.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "airflow.web.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: web + {{- if or .Values.web.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.web.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: airflow-web + {{- with .Values.web.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.web.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.web.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "airflow.web.fullname" . }} + {{- if .Values.web.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.web.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/hpa.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/hpa.yaml new file mode 100644 index 00000000..04dc75a2 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor")) (coalesce .Values.worker.autoscaling.hpa.enabled .Values.worker.autoscaling.enabled) }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "airflow.worker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "airflow.worker.fullname" . }} + minReplicas: {{ coalesce .Values.worker.autoscaling.hpa.minReplicas .Values.worker.autoscaling.minReplicas }} + maxReplicas: {{ coalesce .Values.worker.autoscaling.hpa.maxReplicas .Values.worker.autoscaling.maxReplicas }} + metrics: + {{- if (coalesce .Values.worker.autoscaling.hpa.targetCPU .Values.worker.autoscaling.targetCPU) }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ coalesce .Values.worker.autoscaling.hpa.targetCPU .Values.worker.autoscaling.targetCPU }} + {{- end }} + {{- if (coalesce .Values.worker.autoscaling.hpa.targetMemory .Values.worker.autoscaling.targetMemory) }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ coalesce .Values.worker.autoscaling.hpa.targetMemory .Values.worker.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/networkpolicy.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/networkpolicy.yaml new file mode 100644 index 00000000..1f2f0010 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/networkpolicy.yaml @@ -0,0 +1,77 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.worker.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "airflow.worker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: worker + policyTypes: + - Ingress + - Egress + {{- if .Values.worker.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.http }} + - port: {{ .Values.worker.containerPorts.http }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.service.ports.ingest }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.worker.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.worker.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.service.ports.http }} + - port: {{ .Values.worker.containerPorts.http }} + {{- if not .Values.worker.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-worker" (include "common.names.fullname" .) }}-client: "true" + {{- if .Values.worker.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.worker.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.worker.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.worker.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.worker.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.worker.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/poddisruptionbudget.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/poddisruptionbudget.yaml new file mode 100644 index 00000000..506f2646 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.worker.pdb.create (or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor")) }} +kind: PodDisruptionBudget +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +metadata: + name: {{ template "airflow.worker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker +spec: + {{- if .Values.worker.pdb.minAvailable }} + minAvailable: {{ .Values.worker.pdb.minAvailable }} + {{- end }} + {{- if or .Values.worker.pdb.maxUnavailable ( not .Values.worker.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.worker.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: worker +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/service-headless.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/service-headless.yaml new file mode 100644 index 00000000..efea2371 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/service-headless.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-hl" (include "airflow.worker.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: worker + port: {{ .Values.worker.containerPorts.http }} + targetPort: worker + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml new file mode 100644 index 00000000..f1111160 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml @@ -0,0 +1,358 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "airflow.worker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.worker.podManagementPolicy }} + podManagementPolicy: {{ .Values.worker.podManagementPolicy | quote }} + {{- end }} + {{- if not (coalesce .Values.worker.autoscaling.hpa.enabled .Values.worker.autoscaling.enabled) }} + replicas: {{ .Values.worker.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: worker + serviceName: {{ printf "%s-hl" (include "airflow.worker.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- if .Values.worker.updateStrategy }} + updateStrategy: {{- toYaml .Values.worker.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/config/configmap.yaml") . | sha256sum }} + checksum/webserver-configmap: {{ include (print $.Template.BasePath "/web/configmap.yaml") . | sha256sum }} + {{- if .Values.worker.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: worker + spec: + {{- include "airflow.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.worker.automountServiceAccountToken }} + {{- if .Values.worker.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.worker.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.worker.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.worker.podAffinityPreset "component" "worker" "customLabels" $podLabels "topologyKey" .Values.worker.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.worker.podAntiAffinityPreset "component" "worker" "customLabels" $podLabels "topologyKey" .Values.worker.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.worker.nodeAffinityPreset.type "key" .Values.worker.nodeAffinityPreset.key "values" .Values.worker.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.worker.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.worker.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.worker.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.worker.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.worker.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.worker.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.worker.priorityClassName }} + priorityClassName: {{ .Values.worker.priorityClassName | quote }} + {{- end }} + {{- if .Values.worker.schedulerName }} + schedulerName: {{ .Values.worker.schedulerName }} + {{- end }} + serviceAccountName: {{ include "airflow.serviceAccountName" . }} + {{- if .Values.worker.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.worker.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- include "airflow.defaultInitContainers.prepareConfig" . | nindent 8 }} + {{- include "airflow.defaultInitContainers.prepareWebConfig" . | nindent 8 }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultInitContainers.loadDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultInitContainers.loadPlugins" . | nindent 8 }} + {{- end }} + {{- include "airflow.defaultInitContainers.waitForDBMigrations" . | nindent 8 }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.worker.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: airflow-worker + image: {{ include "airflow.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.worker.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.worker.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.worker.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.worker.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.worker.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.worker.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- include "airflow.configure.airflow.common" . | nindent 12 }} + - name: AIRFLOW_COMPONENT_TYPE + value: "worker" + - name: AIRFLOW_EXECUTOR + value: {{ include "airflow.worker.executor" . }} + - name: PYTHONPYCACHEPREFIX + value: "/opt/drycc/airflow/venv/tmp" + {{- if or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor") }} + {{- include "airflow.configure.redis" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.worker.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.worker.extraEnvVarsCM .Values.worker.extraEnvVarsSecret .Values.worker.extraEnvVarsSecrets .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.extraEnvVarsSecrets }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.worker.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.worker.extraEnvVarsCM }} + {{- end }} + {{- if .Values.worker.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.worker.extraEnvVarsSecret }} + {{- end }} + {{- if .Values.extraEnvVarsSecrets }} + {{- range .Values.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.worker.extraEnvVarsSecrets }} + {{- range .Values.worker.extraEnvVarsSecrets }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- end }} + ports: + - name: worker + containerPort: {{ .Values.worker.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.worker.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: worker + {{- end }} + {{- if .Values.worker.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: worker + {{- end }} + {{- if .Values.worker.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.worker.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.worker.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.worker.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - sh + - -c + - | + . /opt/drycc/airflow/venv/bin/activate && \ + CONNECTION_CHECK_MAX_COUNT=0 python -m celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d celery@$(hostname) + {{- end }} + {{- end }} + {{- if .Values.worker.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.worker.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.worker.resources }} + resources: {{- toYaml .Values.worker.resources | nindent 12 }} + {{- else if ne .Values.worker.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.worker.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/nss-wrapper + subPath: app-nss-wrapper-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.db + subPath: app-base-dir/airflow.db + - name: empty-dir + mountPath: /opt/drycc/airflow/airflow.cfg + subPath: app-base-dir/airflow.cfg + - name: empty-dir + mountPath: /opt/drycc/airflow/config/airflow_local_settings.py + subPath: app-conf-dir/airflow_local_settings.py + - name: empty-dir + mountPath: /opt/drycc/airflow/webserver_config.py + subPath: app-base-dir/webserver_config.py + {{- if .Values.requirements }} + - name: req-config + mountPath: /drycc/python/requirements.txt + subPath: requirements.txt + {{- end }} + {{- if .Values.pip }} + - name: req-config + mountPath: /opt/drycc/.config/pip/pip.conf + subPath: pip.conf + {{- end }} + {{- if and .Values.web.containerSecurityContext.enabled .Values.web.containerSecurityContext.readOnlyRootFilesystem }} + - name: empty-dir + mountPath: /opt/drycc/airflow/venv/tmp + subPath: app-pyc-cache-dir + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + mountPath: /opt/drycc/airflow/secrets + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + mountPath: /opt/drycc/airflow/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.worker.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.defaultSidecars.syncDAGs" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.defaultSidecars.syncPlugins" . | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + {{- if .Values.worker.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.sidecars "context" $) | trim | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: configuration + configMap: + name: {{ include "airflow.configMapName" . }} + optional: true + - name: webserver-configuration + configMap: + name: {{ include "airflow.web.configMapName" . }} + {{- if or .Values.requirements .Values.pip }} + - name: req-config + configMap: + name: {{ include "common.names.fullname" . }}-req + {{- end }} + {{- if .Values.web.tls.enabled }} + - name: tls-certificates + secret: + secretName: {{ template "airflow.web.tls.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: airflow-secrets + projected: + sources: + - secret: + name: {{ include "airflow.secretName" . }} + - secret: + name: {{ include "airflow.database.secretName" . }} + {{- if .Values.ldap.enabled }} + - secret: + name: {{ include "airflow.ldap.secretName" . }} + {{- end }} + {{- end }} + {{- if .Values.dags.enabled }} + {{- include "airflow.dags.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.plugins.enabled }} + {{- include "airflow.plugins.volumes" . | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.worker.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.worker.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.worker.persistence.enabled }} + - name: "data" + emptyDir: {} + {{- else if .Values.worker.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.worker.persistence.existingClaim }} + {{- else }} + {{- if .Values.worker.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.worker.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.worker.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: "data" + {{- if or .Values.worker.persistence.annotations .Values.commonAnnotations }} + {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.worker.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.worker.persistence.size | quote }} + {{- if .Values.worker.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.worker.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.worker.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.worker.persistence.existingVolume }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.worker.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/vpa.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/vpa.yaml new file mode 100644 index 00000000..82de8ba4 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/templates/worker/vpa.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (or (eq .Values.executor "CeleryExecutor") (eq .Values.executor "CeleryKubernetesExecutor")) (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.worker.autoscaling.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "airflow.worker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: worker + {{- if or .Values.worker.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: airflow-worker + {{- with .Values.worker.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.worker.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.worker.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "airflow.worker.fullname" . }} + {{- if .Values.worker.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.worker.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/airflow/3/chart/airflow-3/values.yaml b/addons/airflow/3/chart/airflow-3/values.yaml new file mode 100644 index 00000000..f0e7d960 --- /dev/null +++ b/addons/airflow/3/chart/airflow-3/values.yaml @@ -0,0 +1,3346 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto + ## @param global.compatibility.omitEmptySeLinuxOptions If set to true, removes the seLinuxOptions from the securityContexts when it is set to an empty object + ## + omitEmptySeLinuxOptions: false + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param apiVersions Override Kubernetes API versions reported by .Capabilities +## +apiVersions: [] +## @param nameOverride String to partially override common.names.name +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param usePasswordFiles Mount credentials as files instead of using environment variables +## +usePasswordFiles: false +## Diagnostic mode +## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) +## @param diagnosticMode.command Command to override all containers in the chart release +## @param diagnosticMode.args Args to override all containers in the chart release +## +diagnosticMode: + enabled: false + command: + - sleep + args: + - infinity + +## @section Airflow common parameters + +## Bitnami Airflow image version +## ref: https://hub.docker.com/r/bitnami/airflow/tags +## @param image.registry [default: REGISTRY_NAME] Airflow image registry +## @param image.repository [default: REPOSITORY_NAME/airflow] Airflow image repository +## @skip image.tag Airflow image tag (immutable tags are recommended) +## @param image.digest Airflow image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Airflow image pull policy +## @param image.pullSecrets Airflow image pull secrets +## @param image.debug Enable image debug mode +image: + registry: registry.drycc.cc + repository: drycc-addons/airflow + tag: "3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: true + +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/airflow#environment-variables +## +auth: + ## @param auth.username Username to access web UI + ## + username: drycc + ## @param auth.password Password to access web UI + ## + password: "" + ## @param auth.fernetKey Fernet key to secure connections + ## ref: https://airflow.readthedocs.io/en/stable/howto/secure-connections.html + ## ref: https://bcb.github.io/airflow/fernet-key + ## + fernetKey: "" + ## @param auth.secretKey Secret key to run your flask app + ## ref: https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#secret-key + ## + secretKey: "" + ## @param auth.jwtSecretKey JWT secret key to run your flask app + ## ref: https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#secret-key + ## + jwtSecretKey: "" + ## @param auth.existingSecret Name of an existing secret to use for Airflow credentials + ## `auth.password`, `auth.fernetKey`, and `auth.secretKey` will be ignored and picked up from this secret + ## The secret must contain the keys `airflow-password`, `airflow-fernet-key` and `airflow-secret-key' + ## The value is evaluated as a template + ## + existingSecret: "" +## @param executor Airflow executor. Allowed values: `LocalExecutor`, `CeleryExecutor`, `KubernetesExecutor`, `SequentialExecutor` (Airflow 2.x only), `CeleryKubernetesExecutor` (Airflow 2.x only), and `LocalKubernetesExecutor` (Airflow 2.x only) +## ref: http://airflow.apache.org/docs/stable/executor/index.html +## +executor: CeleryExecutor +## @param loadExamples Switch to load some Airflow examples +## +loadExamples: false +## @param configuration Specify content for Airflow config file (auto-generated based on other parameters otherwise) +## ref: https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html +## Use YAML format, then it's transformed to .cfg format by the chart. e.g: +## configuration: +## core: +## dags_folder: "/opt/drycc/airflow/dags" +## ... will be transformed to: +## [core] +## dags_folder = "/opt/drycc/airflow/dags" +## +configuration: {} +## @param overrideConfiguration Airflow common configuration override. Values defined here takes precedence over the ones defined at `configuration` +## +overrideConfiguration: {} +## @param localSettings Specify content for Airflow local settings (airflow_local_settings.py) +## ref: https://airflow.apache.org/docs/apache-airflow/stable/howto/set-config.html#configuring-local-settings +## e.g: +## localSettings: |- +## ... +## +localSettings: "" +## @param existingConfigmap Name of an existing ConfigMap with the Airflow config file and, optionally, the local settings file +## +existingConfigmap: "" +## Load custom DAGs files from a ConfigMap or Git repositories +## @param dags.enabled Enable loading DAGs from a ConfigMap or Git repositories +## @param dags.existingConfigmap Name of an existing ConfigMap with all the DAGs files you want to load in Airflow +## @param dags.repositories [array] Array of repositories from which to download DAG files +## +dags: + enabled: false + existingConfigmap: "" + ## E.g: + ## repositories: + ## - repository: https://github.com/myuser/myrepo + ## branch: main + ## name: my-dags + ## path: / + ## + repositories: [] + + ## @param dags.sshKey SSH Private key used to clone/sync DAGs from Git repositories (ignored if dags.existingSshKeySecret is set) + ## + sshKey: "" + ## @param dags.existingSshKeySecret Name of a secret containing the SSH private key used to clone/sync DAGs from Git repositories + ## + existingSshKeySecret: "" + ## @param dags.existingSshKeySecretKey Key in the existing secret containing the SSH private key + ## + existingSshKeySecretKey: "" +## Load custom plugins from Git repositories +## @param plugins.enabled Enable loading plugins from Git repositories +## @param plugins.repositories [array] Array of repositories from which to download plugins +## +plugins: + enabled: false + ## E.g: + ## repositories: + ## - repository: https://github.com/myuser/myrepo + ## branch: main + ## name: my-plugins + ## path: / + ## + repositories: [] + ## @param plugins.sshKey SSH Private key used to clone/sync plugins from Git repositories (ignored if plugins.existingSshKeySecret is set) + ## + sshKey: "" + ## @param plugins.existingSshKeySecret Name of a secret containing the SSH private key used to clone/sync plugins from Git repositories + ## + existingSshKeySecret: "" + ## @param plugins.existingSshKeySecretKey Key in the existing secret containing the SSH private key + ## + existingSshKeySecretKey: "" + +## @param requirements Specify content for Airflow worker requirements.txt file +## e.g: +## requirements: |- +## pylint==2.13.9 +## ... +## +requirements: "" + +## @param pip.conf Specify content for Airflow worker pip.conf file +## e.g: +## pip: |- +## [global] +## ... +## +pip: "" + +## Default init Containers +## +defaultInitContainers: + ## Airflow "prepare-config" init container + ## Used to prepare the Airflow configuration files for main containers to use them + ## + prepareConfig: + ## Configure "prepare-config" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.enabled Enabled "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsUser Set runAsUser in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsGroup Set runAsUser in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.privileged Set privileged in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.add List of capabilities to be added in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.seccompProfile.type Set seccomp profile in "prepare-config" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Airflow "prepare-config" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.prepareConfig.resourcesPreset Set Airflow "prepare-config" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.prepareConfig.resources is set (defaultInitContainers.prepareConfig.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.prepareConfig.resources Set Airflow "prepare-config" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Airflow "wait-for-db-migrations" init container + ## Used to wait for db migrations to be ready + ## + waitForDBMigrations: + ## Configure "wait-for-db-migrations" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.enabled Enabled "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "wait-for-db-migrations" init-containers + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsUser Set runAsUser in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsGroup Set runAsUser in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.privileged Set privileged in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "wait-for-db-migrations" init-containers' Security Context + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.capabilities.add List of capabilities to be added in "wait-for-db-migrations" init-containers + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "wait-for-db-migrations" init-containers + ## @param defaultInitContainers.waitForDBMigrations.containerSecurityContext.seccompProfile.type Set seccomp profile in "wait-for-db-migrations" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Airflow "wait-for-db-migrations" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.waitForDBMigrations.resourcesPreset Set Airflow "wait-for-db-migrations" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.waitForDBMigrations.resources is set (defaultInitContainers.waitForDBMigrations.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param defaultInitContainers.waitForDBMigrations.resources Set Airflow "wait-for-db-migrations" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Airflow "load-dags-plugins" init container + ## Used to load DAGs and/or plugins from a ConfigMap or Git repositories + ## + loadDAGsPlugins: + ## @param defaultInitContainers.loadDAGsPlugins.command Override cmd + ## @param defaultInitContainers.loadDAGsPlugins.args Override args + ## @param defaultInitContainers.loadDAGsPlugins.extraVolumeMounts Add extra volume mounts + ## @param defaultInitContainers.loadDAGsPlugins.extraEnvVars Add extra environment variables + ## @param defaultInitContainers.loadDAGsPlugins.extraEnvVarsCM ConfigMap with extra environment variables + ## @param defaultInitContainers.loadDAGsPlugins.extraEnvVarsSecret Secret with extra environment variables + command: [] + args: [] + extraVolumeMounts: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + ## Configure "load-dags-plugins" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.enabled Enabled "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "load-dags-plugins" init-containers + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsUser Set runAsUser in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsGroup Set runAsUser in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.privileged Set privileged in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "load-dags-plugins" init-containers' Security Context + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.capabilities.add List of capabilities to be added in "load-dags-plugins" init-containers + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "load-dags-plugins" init-containers + ## @param defaultInitContainers.loadDAGsPlugins.containerSecurityContext.seccompProfile.type Set seccomp profile in "load-dags-plugins" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Airflow "load-dags-plugins" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.loadDAGsPlugins.resourcesPreset Set Airflow "load-dags-plugins" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.loadDAGsPlugins.resources is set (defaultInitContainers.loadDAGsPlugins.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.loadDAGsPlugins.resources Set Airflow "load-dags-plugins" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} +## Default sidecars +## +defaultSidecars: + ## Airflow "sync-dags-plugins" sidecar + ## Used to sync DAGs and/or plugins from Git repositories + ## + syncDAGsPlugins: + ## @param defaultSidecars.syncDAGsPlugins.interval Interval in seconds to pull the git repository containing the DAGs and/or plugins + ## @param defaultSidecars.syncDAGsPlugins.command Override cmd + ## @param defaultSidecars.syncDAGsPlugins.args Override args + ## @param defaultSidecars.syncDAGsPlugins.extraVolumeMounts Add extra volume mounts + ## @param defaultSidecars.syncDAGsPlugins.extraEnvVars Add extra environment variables + ## @param defaultSidecars.syncDAGsPlugins.extraEnvVarsCM ConfigMap with extra environment variables + ## @param defaultSidecars.syncDAGsPlugins.extraEnvVarsSecret Secret with extra environment variables + interval: 60 + command: [] + args: [] + extraVolumeMounts: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + ## Configure "sync-dags-plugins" sidecar Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.enabled Enabled "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "sync-dags-plugins" sidecars + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsUser Set runAsUser in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsGroup Set runAsUser in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.privileged Set privileged in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "sync-dags-plugins" sidecars' Security Context + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.capabilities.add List of capabilities to be added in "sync-dags-plugins" sidecars + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "sync-dags-plugins" sidecars + ## @param defaultSidecars.syncDAGsPlugins.containerSecurityContext.seccompProfile.type Set seccomp profile in "sync-dags-plugins" sidecars + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Airflow "sync-dags-plugins" sidecar resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultSidecars.syncDAGsPlugins.resourcesPreset Set Airflow "sync-dags-plugins" sidecar resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultSidecars.syncDAGsPlugins.resources is set (defaultSidecars.syncDAGsPlugins.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultSidecars.syncDAGsPlugins.resources Set Airflow "sync-dags-plugins" sidecar requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} +## @param extraEnvVars Add extra environment variables for all the Airflow pods +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables for all the Airflow pods +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables for all the Airflow pods +## +extraEnvVarsSecret: "" +## @param extraEnvVarsSecrets List of secrets with extra environment variables for all the Airflow pods +## +extraEnvVarsSecrets: [] +## @param sidecars Add additional sidecar containers to all the Airflow pods +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to all the Airflow pods +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for all the Airflow pods +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes for the all the Airflow pods +## +extraVolumes: [] + +## @section Airflow webserver parameters +## +web: + ## @param web.baseUrl URL used to access to Airflow webserver + ## + baseUrl: "" + ## @param web.configuration Specify content for webserver_config.py (auto-generated based on other env. vars otherwise) + ## + configuration: "" + ## @param web.extraConfiguration Specify extra content to be appended to default webserver_config.py (ignored if `web.configuration` or `web.existingConfigmap` are set) + ## + extraConfiguration: "" + ## @param web.existingConfigmap Name of an existing config map containing the Airflow webserver config file + ## + existingConfigmap: "" + ## @param web.tls.enabled Enable TLS configuration for Airflow webserver + ## @param web.tls.autoGenerated.enabled Enable automatic generation of TLS certificates + ## @param web.tls.autoGenerated.engine Mechanism to generate the certificates (allowed values: helm, cert-manager) + ## @param web.tls.autoGenerated.certManager.existingIssuer The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine) + ## @param web.tls.autoGenerated.certManager.existingIssuerKind Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine) + ## @param web.tls.autoGenerated.certManager.keyAlgorithm Key algorithm for the certificates (only for `cert-manager` engine) + ## @param web.tls.autoGenerated.certManager.keySize Key size for the certificates (only for `cert-manager` engine) + ## @param web.tls.autoGenerated.certManager.duration Duration for the certificates (only for `cert-manager` engine) + ## @param web.tls.autoGenerated.certManager.renewBefore Renewal period for the certificates (only for `cert-manager` engine) + ## @param web.tls.ca CA certificate for TLS. Ignored if `tls.existingSecret` is set + ## @param web.tls.cert TLS certificate for Airflow webserver. Ignored if `tls.master.existingSecret` is set + ## @param web.tls.key TLS key for Airflow webserver. Ignored if `tls.master.existingSecret` is set + ## @param web.tls.existingSecret The name of an existing Secret containing the Airflow webserver certificates for TLS + ## + tls: + enabled: false + autoGenerated: + enabled: true + engine: helm + certManager: + existingIssuer: "" + existingIssuerKind: "" + keySize: 2048 + keyAlgorithm: RSA + duration: 2160h + renewBefore: 360h + ca: "" + cert: "" + key: "" + existingSecret: "" + ## @param web.command Override default container command (useful when using custom images) + ## + command: [] + ## @param web.args Override default container args (useful when using custom images) + ## + args: [] + ## @param web.extraEnvVars Array with extra environment variables to add Airflow webserver pods + ## + extraEnvVars: [] + ## @param web.extraEnvVarsCM ConfigMap containing extra environment variables for Airflow webserver pods + ## + extraEnvVarsCM: "" + ## @param web.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Airflow webserver pods + ## + extraEnvVarsSecret: "" + ## @param web.extraEnvVarsSecrets List of secrets with extra environment variables for Airflow webserver pods + ## + extraEnvVarsSecrets: [] + ## @param web.containerPorts.http Airflow webserver HTTP container port + ## + containerPorts: + http: 8080 + ## @param web.replicaCount Number of Airflow webserver replicas + ## + replicaCount: 1 + ## Configure extra options for Airflow webserver containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param web.livenessProbe.enabled Enable livenessProbe on Airflow webserver containers + ## @param web.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param web.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param web.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param web.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param web.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param web.readinessProbe.enabled Enable readinessProbe on Airflow webserver containers + ## @param web.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param web.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param web.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param web.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param web.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param web.startupProbe.enabled Enable startupProbe on Airflow webserver containers + ## @param web.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param web.startupProbe.periodSeconds Period seconds for startupProbe + ## @param web.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param web.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param web.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param web.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param web.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param web.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Airflow webserver resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param web.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if web.resources is set (web.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "medium" + ## @param web.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure Airflow webserver pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param web.podSecurityContext.enabled Enabled Airflow webserver pods' Security Context + ## @param web.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param web.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param web.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param web.podSecurityContext.fsGroup Set Airflow webserver pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Airflow webserver containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param web.containerSecurityContext.enabled Enabled Airflow webserver containers' Security Context + ## @param web.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param web.containerSecurityContext.runAsUser Set Airflow webserver containers' Security Context runAsUser + ## @param web.containerSecurityContext.runAsGroup Set Airflow webserver containers' Security Context runAsGroup + ## @param web.containerSecurityContext.runAsNonRoot Set Airflow webserver containers' Security Context runAsNonRoot + ## @param web.containerSecurityContext.privileged Set web container's Security Context privileged + ## @param web.containerSecurityContext.allowPrivilegeEscalation Set web container's Security Context allowPrivilegeEscalation + ## @param web.containerSecurityContext.readOnlyRootFilesystem Set web container's Security Context readOnlyRootFilesystem + ## @param web.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param web.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param web.lifecycleHooks for the Airflow webserver container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param web.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param web.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param web.podLabels Add extra labels to the Airflow webserver pods + ## + podLabels: {} + ## @param web.podAnnotations Add extra annotations to the Airflow webserver pods + ## + podAnnotations: {} + ## @param web.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param web.affinity Affinity for Airflow webserver pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `web.podAffinityPreset`, `web.podAntiAffinityPreset`, and `web.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param web.nodeAffinityPreset.key Node label key to match. Ignored if `web.affinity` is set. + ## @param web.nodeAffinityPreset.type Node affinity preset type. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard` + ## @param web.nodeAffinityPreset.values Node label values to match. Ignored if `web.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param web.nodeSelector Node labels for Airflow webserver pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param web.podAffinityPreset Pod affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param web.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param web.tolerations Tolerations for Airflow webserver pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param web.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param web.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param web.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param web.terminationGracePeriodSeconds Seconds Airflow webserver pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param web.updateStrategy.type Airflow webserver deployment strategy type + ## @param web.updateStrategy.rollingUpdate Airflow webserver deployment rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param web.sidecars Add additional sidecar containers to the Airflow webserver pods + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param web.initContainers Add additional init containers to the Airflow webserver pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param web.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow webserver pods + ## + extraVolumeMounts: [] + ## @param web.extraVolumes Optionally specify extra list of additional volumes for the Airflow webserver pods + ## + extraVolumes: [] + ## Airflow webserver Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param web.pdb.create Deploy a pdb object for the Airflow webserver pods + ## @param web.pdb.minAvailable Maximum number/percentage of unavailable Airflow webserver replicas + ## @param web.pdb.maxUnavailable Maximum number/percentage of unavailable Airflow webserver replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param web.autoscaling.vpa.enabled Enable VPA for Airflow webserver + ## @param web.autoscaling.vpa.annotations Annotations for VPA resource + ## @param web.autoscaling.vpa.controlledResources List of resources that the VPA can control. Defaults to cpu and memory + ## @param web.autoscaling.vpa.maxAllowed VPA max allowed resources for the pod + ## @param web.autoscaling.vpa.minAllowed VPA min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param web.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param web.autoscaling.hpa.enabled Enable HPA for Airflow webserver + ## @param web.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param web.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param web.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param web.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Web Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param web.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param web.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Web is listening + ## on. When true, Web will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param web.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param web.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param web.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param web.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param web.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow scheduler parameters +## +scheduler: + ## @param scheduler.replicaCount Number of scheduler replicas + ## + replicaCount: 1 + ## @param scheduler.command Override cmd + ## + command: [] + ## @param scheduler.args Override args + ## + args: [] + ## @param scheduler.extraEnvVars Add extra environment variables + ## + extraEnvVars: [] + ## @param scheduler.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param scheduler.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param scheduler.extraEnvVarsSecrets List of secrets with extra environment variables for Airflow scheduler pods + ## + extraEnvVarsSecrets: [] + ## Configure extra options for Airflow scheduler containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param scheduler.livenessProbe.enabled Enable livenessProbe on Airflow scheduler containers + ## @param scheduler.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param scheduler.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param scheduler.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param scheduler.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param scheduler.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param scheduler.readinessProbe.enabled Enable readinessProbe on Airflow scheduler containers + ## @param scheduler.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param scheduler.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param scheduler.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param scheduler.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param scheduler.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param scheduler.startupProbe.enabled Enable startupProbe on Airflow scheduler containers + ## @param scheduler.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param scheduler.startupProbe.periodSeconds Period seconds for startupProbe + ## @param scheduler.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param scheduler.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param scheduler.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param scheduler.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param scheduler.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param scheduler.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Airflow scheduler resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if scheduler.resources is set (scheduler.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param scheduler.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure Airflow scheduler pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param scheduler.podSecurityContext.enabled Enabled Airflow scheduler pods' Security Context + ## @param scheduler.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param scheduler.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param scheduler.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param scheduler.podSecurityContext.fsGroup Set Airflow scheduler pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Airflow scheduler containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param scheduler.containerSecurityContext.enabled Enabled Airflow scheduler containers' Security Context + ## @param scheduler.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param scheduler.containerSecurityContext.runAsUser Set Airflow scheduler containers' Security Context runAsUser + ## @param scheduler.containerSecurityContext.runAsGroup Set Airflow scheduler containers' Security Context runAsGroup + ## @param scheduler.containerSecurityContext.runAsNonRoot Set Airflow scheduler containers' Security Context runAsNonRoot + ## @param scheduler.containerSecurityContext.privileged Set scheduler container's Security Context privileged + ## @param scheduler.containerSecurityContext.allowPrivilegeEscalation Set scheduler container's Security Context allowPrivilegeEscalation + ## @param scheduler.containerSecurityContext.readOnlyRootFilesystem Set scheduler container's Security Context readOnlyRootFilesystem + ## @param scheduler.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param scheduler.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param scheduler.lifecycleHooks for the Airflow scheduler container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param scheduler.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param scheduler.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param scheduler.podLabels Add extra labels to the Airflow scheduler pods + ## + podLabels: {} + ## @param scheduler.podAnnotations Add extra annotations to the Airflow scheduler pods + ## + podAnnotations: {} + ## @param scheduler.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param scheduler.affinity Affinity for Airflow scheduler pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `scheduler.podAffinityPreset`, `scheduler.podAntiAffinityPreset`, and `scheduler.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param scheduler.nodeAffinityPreset.key Node label key to match. Ignored if `scheduler.affinity` is set. + ## @param scheduler.nodeAffinityPreset.type Node affinity preset type. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard` + ## @param scheduler.nodeAffinityPreset.values Node label values to match. Ignored if `scheduler.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param scheduler.nodeSelector Node labels for Airflow scheduler pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param scheduler.podAffinityPreset Pod affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param scheduler.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param scheduler.tolerations Tolerations for Airflow scheduler pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param scheduler.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param scheduler.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param scheduler.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param scheduler.terminationGracePeriodSeconds Seconds Airflow scheduler pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param scheduler.updateStrategy.type Airflow scheduler deployment strategy type + ## @param scheduler.updateStrategy.rollingUpdate Airflow scheduler deployment rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param scheduler.sidecars Add additional sidecar containers to the Airflow scheduler pods + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param scheduler.initContainers Add additional init containers to the Airflow scheduler pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param scheduler.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow scheduler pods + ## + extraVolumeMounts: [] + ## @param scheduler.extraVolumes Optionally specify extra list of additional volumes for the Airflow scheduler pods + ## + extraVolumes: [] + ## Airflow scheduler Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param scheduler.pdb.create Deploy a pdb object for the Airflow scheduler pods + ## @param scheduler.pdb.minAvailable Maximum number/percentage of unavailable Airflow scheduler replicas + ## @param scheduler.pdb.maxUnavailable Maximum number/percentage of unavailable Airflow scheduler replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param scheduler.autoscaling.vpa.enabled Enable VPA for Airflow scheduler + ## @param scheduler.autoscaling.vpa.annotations Annotations for VPA resource + ## @param scheduler.autoscaling.vpa.controlledResources List of resources that the VPA can control. Defaults to cpu and memory + ## @param scheduler.autoscaling.vpa.maxAllowed VPA max allowed resources for the pod + ## @param scheduler.autoscaling.vpa.minAllowed VPA min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param scheduler.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param scheduler.autoscaling.hpa.enabled Enable HPA for Airflow scheduler + ## @param scheduler.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param scheduler.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param scheduler.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param scheduler.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Scheduler Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param scheduler.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param scheduler.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Scheduler is listening + ## on. When true, Scheduler will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param scheduler.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param scheduler.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param scheduler.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param scheduler.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param scheduler.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow Dag Processor parameters +## ref: https://airflow.apache.org/docs/apache-airflow/stable/authoring-and-scheduling/dagfile-processing.html +## +dagProcessor: + ## @param dagProcessor.enabled Run Airflow Dag Processor Manager as a standalone component + ## + enabled: true + ## @param dagProcessor.replicaCount Number of Airflow Dag Processor replicas + ## + replicaCount: 1 + ## @param dagProcessor.command Override default Airflow Dag Processor cmd + ## + command: [] + ## @param dagProcessor.args Override default Airflow Dag Processor args + ## + args: [] + ## @param dagProcessor.extraEnvVars Add extra environment variables to Airflow Dag Processor containers + ## + extraEnvVars: [] + ## @param dagProcessor.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param dagProcessor.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## Configure extra options for Airflow Dag Processor containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param dagProcessor.livenessProbe.enabled Enable livenessProbe on Airflow Dag Processor containers + ## @param dagProcessor.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param dagProcessor.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param dagProcessor.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param dagProcessor.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param dagProcessor.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param dagProcessor.readinessProbe.enabled Enable readinessProbe on Airflow Dag Processor containers + ## @param dagProcessor.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param dagProcessor.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param dagProcessor.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param dagProcessor.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param dagProcessor.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param dagProcessor.startupProbe.enabled Enable startupProbe on Airflow Dag Processor containers + ## @param dagProcessor.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param dagProcessor.startupProbe.periodSeconds Period seconds for startupProbe + ## @param dagProcessor.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param dagProcessor.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param dagProcessor.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param dagProcessor.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param dagProcessor.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param dagProcessor.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Airflow Dag Processor resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param dagProcessor.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dagProcessor.resources is set (dagProcessor.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param dagProcessor.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure Airflow Dag Processor pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param dagProcessor.podSecurityContext.enabled Enabled Airflow Dag Processor pods' Security Context + ## @param dagProcessor.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param dagProcessor.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param dagProcessor.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param dagProcessor.podSecurityContext.fsGroup Set Airflow Dag Processor pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Airflow Dag Processor containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param dagProcessor.containerSecurityContext.enabled Enabled Airflow Dag Processor containers' Security Context + ## @param dagProcessor.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param dagProcessor.containerSecurityContext.runAsUser Set Airflow Dag Processor containers' Security Context runAsUser + ## @param dagProcessor.containerSecurityContext.runAsGroup Set Airflow Dag Processor containers' Security Context runAsGroup + ## @param dagProcessor.containerSecurityContext.runAsNonRoot Set Airflow Dag Processor containers' Security Context runAsNonRoot + ## @param dagProcessor.containerSecurityContext.privileged Set Airflow Dag Processor container's Security Context privileged + ## @param dagProcessor.containerSecurityContext.allowPrivilegeEscalation Set Airflow Dag Processor container's Security Context allowPrivilegeEscalation + ## @param dagProcessor.containerSecurityContext.readOnlyRootFilesystem Set Airflow Dag Processor container's Security Context readOnlyRootFilesystem + ## @param dagProcessor.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param dagProcessor.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param dagProcessor.lifecycleHooks for the Airflow Dag Processor containers to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param dagProcessor.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param dagProcessor.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param dagProcessor.podLabels Add extra labels to the Airflow Dag Processor pods + ## + podLabels: {} + ## @param dagProcessor.podAnnotations Add extra annotations to the Airflow Dag Processor pods + ## + podAnnotations: {} + ## @param dagProcessor.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param dagProcessor.affinity Affinity for Airflow Dag Processor pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `dagProcessor.podAffinityPreset`, `dagProcessor.podAntiAffinityPreset`, and `dagProcessor.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param dagProcessor.nodeAffinityPreset.key Node label key to match. Ignored if `dagProcessor.affinity` is set. + ## @param dagProcessor.nodeAffinityPreset.type Node affinity preset type. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard` + ## @param dagProcessor.nodeAffinityPreset.values Node label values to match. Ignored if `dagProcessor.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param dagProcessor.nodeSelector Node labels for Airflow Dag Processor pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param dagProcessor.podAffinityPreset Pod affinity preset. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param dagProcessor.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `dagProcessor.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param dagProcessor.tolerations Tolerations for Airflow Dag Processor pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param dagProcessor.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param dagProcessor.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param dagProcessor.schedulerName Use an alternate K8s scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param dagProcessor.terminationGracePeriodSeconds Seconds Airflow Dag Processor pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param dagProcessor.updateStrategy.type Airflow Dag Processor deployment strategy type + ## @param dagProcessor.updateStrategy.rollingUpdate Airflow Dag Processor deployment rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param dagProcessor.sidecars Add additional sidecar containers to the Airflow Dag Processor pods + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param dagProcessor.initContainers Add additional init containers to the Airflow Dag Processor pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param dagProcessor.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow Dag Processor containers + ## + extraVolumeMounts: [] + ## @param dagProcessor.extraVolumes Optionally specify extra list of additional volumes for the Airflow Dag Processor pods + ## + extraVolumes: [] + ## Airflow Dag Processor Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param dagProcessor.pdb.create Deploy a pdb object for the Airflow Dag Processor pods + ## @param dagProcessor.pdb.minAvailable Maximum number/percentage of unavailable Airflow Dag Processor replicas + ## @param dagProcessor.pdb.maxUnavailable Maximum number/percentage of unavailable Airflow Dag Processor replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param dagProcessor.autoscaling.vpa.enabled Enable VPA for Airflow Dag Processor + ## @param dagProcessor.autoscaling.vpa.annotations Annotations for VPA resource + ## @param dagProcessor.autoscaling.vpa.controlledResources List of resources that the VPA can control. Defaults to cpu and memory + ## @param dagProcessor.autoscaling.vpa.maxAllowed VPA max allowed resources for the pod + ## @param dagProcessor.autoscaling.vpa.minAllowed VPA min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param dagProcessor.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param dagProcessor.autoscaling.hpa.enabled Enable HPA for Airflow Dag Processor + ## @param dagProcessor.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param dagProcessor.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param dagProcessor.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param dagProcessor.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Airflow Dag Processor Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param dagProcessor.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param dagProcessor.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Airflow Dag Processor is listening + ## on. When true, Airflow Dag Processor will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param dagProcessor.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param dagProcessor.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param dagProcessor.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param dagProcessor.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param dagProcessor.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow Triggerer parameters +## ref: https://airflow.apache.org/docs/apache-airflow/stable/authoring-and-scheduling/deferring.html#deferrable-operators-triggers +## +triggerer: + ## @param triggerer.enabled Run Airflow Triggerer as a standalone component + ## + enabled: true + ## @param triggerer.defaultCapacity How many triggers a single Triggerer can run at once + ## + defaultCapacity: 1000 + ## @param triggerer.replicaCount Number of Airflow Triggerer replicas + ## + replicaCount: 1 + ## @param triggerer.command Override default Airflow Triggerer cmd + ## + command: [] + ## @param triggerer.args Override default Airflow Triggerer args + ## + args: [] + ## @param triggerer.extraEnvVars Add extra environment variables to Airflow Triggerer containers + ## + extraEnvVars: [] + ## @param triggerer.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param triggerer.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param triggerer.containerPorts.logs Airflow Triggerer logs container port + ## + containerPorts: + logs: 8794 + ## Configure extra options for Airflow Triggerer containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param triggerer.livenessProbe.enabled Enable livenessProbe on Airflow Triggerer containers + ## @param triggerer.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param triggerer.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param triggerer.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param triggerer.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param triggerer.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param triggerer.readinessProbe.enabled Enable readinessProbe on Airflow Triggerer containers + ## @param triggerer.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param triggerer.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param triggerer.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param triggerer.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param triggerer.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 15 + failureThreshold: 6 + successThreshold: 1 + ## @param triggerer.startupProbe.enabled Enable startupProbe on Airflow Triggerer containers + ## @param triggerer.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param triggerer.startupProbe.periodSeconds Period seconds for startupProbe + ## @param triggerer.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param triggerer.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param triggerer.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param triggerer.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param triggerer.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param triggerer.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Airflow Triggerer resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param triggerer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if triggerer.resources is set (triggerer.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param triggerer.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure Airflow Triggerer pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param triggerer.podSecurityContext.enabled Enabled Airflow Triggerer pods' Security Context + ## @param triggerer.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param triggerer.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param triggerer.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param triggerer.podSecurityContext.fsGroup Set Airflow Triggerer pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Airflow Triggerer containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param triggerer.containerSecurityContext.enabled Enabled Airflow Triggerer containers' Security Context + ## @param triggerer.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param triggerer.containerSecurityContext.runAsUser Set Airflow Triggerer containers' Security Context runAsUser + ## @param triggerer.containerSecurityContext.runAsGroup Set Airflow Triggerer containers' Security Context runAsGroup + ## @param triggerer.containerSecurityContext.runAsNonRoot Set Airflow Triggerer containers' Security Context runAsNonRoot + ## @param triggerer.containerSecurityContext.privileged Set Airflow Triggerer container's Security Context privileged + ## @param triggerer.containerSecurityContext.allowPrivilegeEscalation Set Airflow Triggerer container's Security Context allowPrivilegeEscalation + ## @param triggerer.containerSecurityContext.readOnlyRootFilesystem Set Airflow Triggerer container's Security Context readOnlyRootFilesystem + ## @param triggerer.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param triggerer.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param triggerer.lifecycleHooks for the Airflow Triggerer containers to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param triggerer.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param triggerer.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param triggerer.podLabels Add extra labels to the Airflow Triggerer pods + ## + podLabels: {} + ## @param triggerer.podAnnotations Add extra annotations to the Airflow Triggerer pods + ## + podAnnotations: {} + ## @param triggerer.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param triggerer.affinity Affinity for Airflow Triggerer pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `triggerer.podAffinityPreset`, `triggerer.podAntiAffinityPreset`, and `triggerer.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param triggerer.nodeAffinityPreset.key Node label key to match. Ignored if `triggerer.affinity` is set. + ## @param triggerer.nodeAffinityPreset.type Node affinity preset type. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard` + ## @param triggerer.nodeAffinityPreset.values Node label values to match. Ignored if `triggerer.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param triggerer.nodeSelector Node labels for Airflow Triggerer pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param triggerer.podAffinityPreset Pod affinity preset. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param triggerer.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `triggerer.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param triggerer.tolerations Tolerations for Airflow Triggerer pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param triggerer.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param triggerer.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param triggerer.schedulerName Use an alternate K8s scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param triggerer.terminationGracePeriodSeconds Seconds Airflow Triggerer pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param triggerer.podManagementPolicy Pod management policy for the Airflow Triggerer statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param triggerer.updateStrategy.type Airflow Triggerer statefulset strategy type + ## @param triggerer.updateStrategy.rollingUpdate Airflow Triggerer statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param triggerer.sidecars Add additional sidecar containers to the Airflow Triggerer pods + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param triggerer.initContainers Add additional init containers to the Airflow Triggerer pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param triggerer.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow Triggerer containers + ## + extraVolumeMounts: [] + ## @param triggerer.extraVolumes Optionally specify extra list of additional volumes for the Airflow Triggerer pods + ## + extraVolumes: [] + ## Airflow Triggerer Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param triggerer.pdb.create Deploy a pdb object for the Airflow Triggerer pods + ## @param triggerer.pdb.minAvailable Maximum number/percentage of unavailable Airflow Triggerer replicas + ## @param triggerer.pdb.maxUnavailable Maximum number/percentage of unavailable Airflow Triggerer replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param triggerer.autoscaling.vpa.enabled Enable VPA for Airflow Triggerer + ## @param triggerer.autoscaling.vpa.annotations Annotations for VPA resource + ## @param triggerer.autoscaling.vpa.controlledResources List of resources that the VPA can control. Defaults to cpu and memory + ## @param triggerer.autoscaling.vpa.maxAllowed VPA max allowed resources for the pod + ## @param triggerer.autoscaling.vpa.minAllowed VPA min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param triggerer.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param triggerer.autoscaling.hpa.enabled Enable HPA + ## @param triggerer.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param triggerer.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param triggerer.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param triggerer.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Airflow Triggerer Persistence Parameters + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes + ## + persistence: + ## @param triggerer.persistence.enabled Enable logs persistence using Persistent Volume Claims + ## + enabled: true + ## @param triggerer.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param triggerer.persistence.annotations Additional Persistent Volume Claim annotations + ## + annotations: {} + ## @param triggerer.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param triggerer.persistence.size Size of logs volume + ## + size: 8Gi + ## @param triggerer.persistence.selector Selector to match an existing Persistent Volume for WordPress data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param triggerer.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param triggerer.persistence.existingClaim The name of an existing PVC to use for persistence (only if triggerer.replicaCount=1) + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param triggerer.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param triggerer.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param triggerer.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Airflow Triggerer Service + ## + service: + ## @param triggerer.service.type Airflow Triggerer service type + ## + type: ClusterIP + ## @param triggerer.service.ports.logs Airflow Triggerer service logs port + ## + ports: + logs: 8794 + ## Node ports to expose + ## @param triggerer.service.nodePorts.logs Node port for Airflow Triggerer service logs + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + logs: "" + ## @param triggerer.service.clusterIP Airflow Triggerer service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param triggerer.service.loadBalancerIP Airflow Triggerer service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param triggerer.service.loadBalancerSourceRanges Airflow Triggerer service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param triggerer.service.externalTrafficPolicy Airflow Triggerer service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param triggerer.service.annotations Additional custom annotations for Airflow Triggerer service + ## + annotations: {} + ## @param triggerer.service.extraPorts Extra ports to expose in Airflow Triggerer service (normally used with the `triggerer.sidecars` value) + ## + extraPorts: [] + ## @param triggerer.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param triggerer.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Airflow Triggerer Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param triggerer.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param triggerer.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Airflow Triggerer is listening + ## on. When true, Airflow Triggerer will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param triggerer.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param triggerer.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param triggerer.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param triggerer.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param triggerer.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow worker parameters +## +worker: + ## @param worker.command Override default container command (useful when using custom images) + ## + command: [] + ## @param worker.args Override default container args (useful when using custom images) + ## + args: [] + ## @param worker.extraEnvVars Array with extra environment variables to add Airflow worker pods + ## + extraEnvVars: [] + ## @param worker.extraEnvVarsCM ConfigMap containing extra environment variables for Airflow worker pods + ## + extraEnvVarsCM: "" + ## @param worker.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Airflow worker pods + ## + extraEnvVarsSecret: "" + ## @param worker.extraEnvVarsSecrets List of secrets with extra environment variables for Airflow worker pods + ## + extraEnvVarsSecrets: [] + ## @param worker.containerPorts.http Airflow worker HTTP container port + ## + containerPorts: + http: 8793 + ## @param worker.replicaCount Number of Airflow worker replicas + ## + replicaCount: 1 + ## Configure extra options for Airflow worker containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param worker.livenessProbe.enabled Enable livenessProbe on Airflow worker containers + ## @param worker.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param worker.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param worker.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param worker.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param worker.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param worker.readinessProbe.enabled Enable readinessProbe on Airflow worker containers + ## @param worker.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param worker.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param worker.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param worker.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param worker.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param worker.startupProbe.enabled Enable startupProbe on Airflow worker containers + ## @param worker.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param worker.startupProbe.periodSeconds Period seconds for startupProbe + ## @param worker.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param worker.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param worker.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param worker.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param worker.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param worker.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Airflow worker resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param worker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if worker.resources is set (worker.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "large" + ## @param worker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure Airflow worker pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param worker.podSecurityContext.enabled Enabled Airflow worker pods' Security Context + ## @param worker.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param worker.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param worker.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param worker.podSecurityContext.fsGroup Set Airflow worker pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Airflow worker containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param worker.containerSecurityContext.enabled Enabled Airflow worker containers' Security Context + ## @param worker.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param worker.containerSecurityContext.runAsUser Set Airflow worker containers' Security Context runAsUser + ## @param worker.containerSecurityContext.runAsGroup Set Airflow worker containers' Security Context runAsGroup + ## @param worker.containerSecurityContext.runAsNonRoot Set Airflow worker containers' Security Context runAsNonRoot + ## @param worker.containerSecurityContext.privileged Set worker container's Security Context privileged + ## @param worker.containerSecurityContext.allowPrivilegeEscalation Set worker container's Security Context allowPrivilegeEscalation + ## @param worker.containerSecurityContext.readOnlyRootFilesystem Set worker container's Security Context readOnlyRootFilesystem + ## @param worker.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param worker.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param worker.lifecycleHooks for the Airflow worker container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param worker.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param worker.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param worker.podLabels Add extra labels to the Airflow worker pods + ## + podLabels: {} + ## @param worker.podAnnotations Add extra annotations to the Airflow worker pods + ## + podAnnotations: {} + ## @param worker.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param worker.affinity Affinity for Airflow worker pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `worker.podAffinityPreset`, `worker.podAntiAffinityPreset`, and `worker.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param worker.nodeAffinityPreset.key Node label key to match. Ignored if `worker.affinity` is set. + ## @param worker.nodeAffinityPreset.type Node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` + ## @param worker.nodeAffinityPreset.values Node label values to match. Ignored if `worker.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param worker.nodeSelector Node labels for Airflow worker pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param worker.podAffinityPreset Pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param worker.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param worker.tolerations Tolerations for Airflow worker pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param worker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param worker.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param worker.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param worker.terminationGracePeriodSeconds Seconds Airflow worker pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param worker.podManagementPolicy Pod management policy for the worker statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: OrderedReady + ## @param worker.updateStrategy.type Airflow worker statefulset strategy type + ## @param worker.updateStrategy.rollingUpdate Airflow worker statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param worker.sidecars Add additional sidecar containers to the Airflow worker pods + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param worker.initContainers Add additional init containers to the Airflow worker pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param worker.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow worker pods + ## + extraVolumeMounts: [] + ## @param worker.extraVolumes Optionally specify extra list of additional volumes for the Airflow worker pods + ## + extraVolumes: [] + ## @param worker.extraVolumeClaimTemplates Optionally specify extra list of volumesClaimTemplates for the Airflow worker statefulset + ## + extraVolumeClaimTemplates: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param worker.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param worker.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param worker.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param worker.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `worker.persistence.selector` is set. + ## + existingVolume: "" + ## @param worker.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `worker.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param worker.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param worker.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param worker.persistence.size Persistent Volume Size + ## + size: 8Gi + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param worker.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param worker.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param worker.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## @param worker.podTemplate Template to replace the default one to be use when `executor=KubernetesExecutor` to create Airflow worker pods + ## + podTemplate: {} + ## Airflow worker Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param worker.pdb.create Deploy a pdb object for the Airflow worker pods + ## @param worker.pdb.minAvailable Maximum number/percentage of unavailable Airflow worker replicas + ## @param worker.pdb.maxUnavailable Maximum number/percentage of unavailable Airflow worker replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param worker.autoscaling.enabled DEPRECATED: use worker.autoscaling.hpa.enabled instead + ## @param worker.autoscaling.minReplicas DEPRECATED: use worker.autoscaling.hpa.minReplicas instead + ## @param worker.autoscaling.maxReplicas DEPRECATED: use worker.autoscaling.hpa.maxReplicas instead + ## @param worker.autoscaling.targetMemory DEPRECATED: use worker.autoscaling.hpa.targetMemory instead + ## @param worker.autoscaling.targetCPU DEPRECATED: use worker.autoscaling.hpa.targetCPU instead + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## @param worker.autoscaling.vpa.enabled Enable VPA for Airflow Worker + ## @param worker.autoscaling.vpa.annotations Annotations for VPA resource + ## @param worker.autoscaling.vpa.controlledResources List of resources that the VPA can control. Defaults to cpu and memory + ## @param worker.autoscaling.vpa.maxAllowed VPA max allowed resources for the pod + ## @param worker.autoscaling.vpa.minAllowed VPA min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param worker.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param worker.autoscaling.hpa.enabled Enable HPA for Airflow Worker + ## @param worker.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param worker.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param worker.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param worker.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPU: 80 + targetMemory: 80 + ## Worker Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param worker.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param worker.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Worker is listening + ## on. When true, Worker will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param worker.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param worker.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param worker.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param worker.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param worker.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow "setup-db" K8s Job parameters +## +setupDBJob: + ## @param setupDBJob.enabled Enable setting up the Airflow database using a K8s job (otherwise it's done by the Webserver on startup) + ## + enabled: true + ## @param setupDBJob.backoffLimit set backoff limit of the job + ## + backoffLimit: 10 + ## @param setupDBJob.command Override default container command on "setup-db" job's containers + ## + command: [] + ## @param setupDBJob.args Override default container args on "setup-db" job's containers + ## + args: [] + ## Configure "setup-db" job's container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param setupDBJob.containerSecurityContext.enabled Enabled "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "setup-db" job's containers + ## @param setupDBJob.containerSecurityContext.runAsUser Set runAsUser in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.runAsGroup Set runAsUser in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.privileged Set privileged in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "setup-db" job's containers' Security Context + ## @param setupDBJob.containerSecurityContext.capabilities.add List of capabilities to be added in "setup-db" job's containers + ## @param setupDBJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "setup-db" job's containers + ## @param setupDBJob.containerSecurityContext.seccompProfile.type Set seccomp profile in "setup-db" job's containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure "setup-db" job's pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param setupDBJob.podSecurityContext.enabled Enabled "setup-db" job's pods' Security Context + ## @param setupDBJob.podSecurityContext.fsGroupChangePolicy Set fsGroupChangePolicy in "setup-db" job's pods' Security Context + ## @param setupDBJob.podSecurityContext.sysctls List of sysctls to allow in "setup-db" job's pods' Security Context + ## @param setupDBJob.podSecurityContext.supplementalGroups List of supplemental groups to add to "setup-db" job's pods' Security Context + ## @param setupDBJob.podSecurityContext.fsGroup Set fsGroup in "setup-db" job's pods' Security Context + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## @param setupDBJob.extraEnvVars Array containing extra env vars to configure the Airflow "setup-db" job's container + ## + extraEnvVars: [] + ## @param setupDBJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the Airflow "setup-db" job's container + ## + extraEnvVarsCM: "" + ## @param setupDBJob.extraEnvVarsSecret Secret containing extra env vars to configure the Airflow "setup-db" job's container (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## Airflow "setup-db" job's container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param setupDBJob.resourcesPreset Set Airflow "setup-db" job's container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if setupDBJob.resources is set (setupDBJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param setupDBJob.resources Set Airflow "setup-db" job's container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param setupDBJob.automountServiceAccountToken Mount Service Account token in Airflow "setup-db" job's pods + ## + automountServiceAccountToken: false + ## @param setupDBJob.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param setupDBJob.annotations [object] Add annotations to the Airflow "setup-db" job + ## + annotations: {} + ## @param setupDBJob.podLabels Additional pod labels for Airflow "setup-db" job + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param setupDBJob.podAnnotations Additional pod annotations for Airflow "setup-db" job + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param setupDBJob.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param setupDBJob.affinity Affinity for Airflow setup-db pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `setupDBJob.podAffinityPreset`, `setupDBJob.podAntiAffinityPreset`, and `setupDBJob.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param setupDBJob.nodeAffinityPreset.key Node label key to match. Ignored if `setupDBJob.affinity` is set. + ## @param setupDBJob.nodeAffinityPreset.type Node affinity preset type. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard` + ## @param setupDBJob.nodeAffinityPreset.values Node label values to match. Ignored if `setupDBJob.affinity` is set. + ## + nodeAffinityPreset: + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + type: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param setupDBJob.nodeSelector Node labels for Airflow setup-db pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param setupDBJob.podAffinityPreset Pod affinity preset. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param setupDBJob.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `setupDBJob.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param setupDBJob.tolerations Tolerations for Airflow setup-db pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param setupDBJob.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param setupDBJob.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param setupDBJob.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param setupDBJob.terminationGracePeriodSeconds Seconds Airflow setup-db pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param setupDBJob.extraVolumes Optionally specify extra list of additional volumes for Airflow "setup-db" job's pods + ## + extraVolumes: [] + ## @param setupDBJob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Airflow "setup-db" job's containers + ## + extraVolumeMounts: [] + ## @param setupDBJob.initContainers Add additional init containers to the Airflow "setup-db" job's pods + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + +## @section Airflow ldap parameters + +## LDAP configuration +## @param ldap.enabled Enable LDAP authentication +## @param ldap.uri Server URI, eg. ldap://ldap_server:389 +## @param ldap.basedn Base of the search, eg. ou=example,o=org. +## @param ldap.searchAttribute if doing an indirect bind to ldap, this is the field that matches the username when searching for the account to bind to +## @param ldap.binddn DN of the account used to search in the LDAP server. +## @param ldap.bindpw Bind Password +## @param ldap.existingSecret Name of an existing secret containing the LDAP bind password +## @param ldap.userRegistration Set to True to enable user self registration +## @param ldap.userRegistrationRole Set role name to be assign when a user registers himself. This role must already exist. Mandatory when using ldap.userRegistration +## @param ldap.rolesMapping mapping from LDAP DN to a list of roles +## @param ldap.rolesSyncAtLogin replace ALL the user's roles each login, or only on registration +## +ldap: + enabled: false + uri: "ldap://ldap_server:389" + basedn: "dc=example,dc=org" + searchAttribute: "cn" + binddn: "cn=admin,dc=example,dc=org" + bindpw: "" + existingSecret: "" + userRegistration: 'True' + userRegistrationRole: "Public" + rolesMapping: '{ "cn=All,ou=Groups,dc=example,dc=org": ["User"], "cn=Admins,ou=Groups,dc=example,dc=org": ["Admin"], }' + rolesSyncAtLogin: 'True' + ## SSL/TLS parameters for LDAP + ## @param ldap.tls.enabled Enabled TLS/SSL for LDAP, you must include the CA file. + ## @param ldap.tls.allowSelfSigned Allow to use self signed certificates + ## @param ldap.tls.certificatesSecret Name of the existing secret containing the certificate CA file that will be used by ldap client + ## @param ldap.tls.certificatesMountPath Where LDAP certifcates are mounted. + ## @param ldap.tls.CAFilename LDAP CA cert filename + ## + tls: + enabled: false + allowSelfSigned: true + certificatesSecret: "" + certificatesMountPath: /opt/drycc/airflow/conf/certs + CAFilename: "" +## @section Traffic Exposure Parameters + +## Airflow service parameters +## +service: + ## @param service.type Airflow service type + ## + type: ClusterIP + ## @param service.ports.http Airflow service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Airflow service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Airflow service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Airflow service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Airflow service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Airflow service + ## + annotations: {} + ## @param service.extraPorts Extra port to expose on Airflow service + ## + extraPorts: [] +## Airflow ingress parameters +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for Airflow + ## + enabled: false + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: airflow.local + ## @param ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: airflow.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - airflow.local + ## secretName: airflow.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: airflow.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## @section Other Parameters + +## Service account for Airflow pods to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Airflow pods + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## @param rbac.create Create Role and RoleBinding +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + +## @section StatsD metrics parameters +## +metrics: + ## @param metrics.enabled Enable a StatsD exporter that collects StatsD metrics from Airflow components and expose them as Prometheus metrics + ## + enabled: false + ## Bitnami StatsD exporter image + ## ref: https://hub.docker.com/r/bitnami/statsd-exporter/tags/ + ## @param metrics.image.registry [default: REGISTRY_NAME] StatsD exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/statsd-exporter] StatsD exporter image repository + ## @skip metrics.image.tag StatsD exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest StatsD exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy StatsD exporter image pull policy + ## @param metrics.image.pullSecrets StatsD exporter image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/statsd-exporter + tag: "0" + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.configuration Specify content for StatsD exporter's mappings.yml + ## + configuration: "" + ## @param metrics.existingConfigmap Name of an existing config map containing the StatsD exporter's mappings.yml + ## + existingConfigmap: "" + ## @param metrics.containerPorts.ingest StatsD exporter ingest container port (used for the metrics ingestion from Airflow components) + ## @param metrics.containerPorts.metrics StatsD exporter metrics container port (used to expose Prometheus metrics) + ## + containerPorts: + ingest: 9125 + metrics: 9102 + ## StatsD exporter resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## StatsD exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param metrics.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param metrics.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param metrics.podSecurityContext.fsGroup Set StatsD exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## StatsD exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable StatsD exporter containers' Security Context + ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.containerSecurityContext.runAsUser Set StatsD exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set StatsD exporter containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set StatsD exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set StatsD exporter containers' Security Context privileged + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set StatsD exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set StatsD exporter containers' Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set containers' Security Context seccomp profile + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure extra options for StatsD exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on StatsD exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on StatsD exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on StatsD exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.lifecycleHooks for the StatsD exporter containers' to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param metrics.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param metrics.hostAliases StatsD exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.podLabels Extra labels for StatsD exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations Extra annotations for StatsD exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param metrics.podAffinityPreset Pod affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.nodeAffinityPreset.key Node label key to match Ignored if `metrics.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.affinity Affinity for StatsD exporter pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.podAffinityPreset, metrics.podAntiAffinityPreset, and metrics.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.nodeSelector Node labels for StatsD exporter pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param metrics.priorityClassName StatsD exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param metrics.schedulerName Name of the k8s scheduler (other than default) for StatsD exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.terminationGracePeriodSeconds Seconds StatsD exporter pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the StatsD exporter pods + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the StatsD exporter containers + ## + extraVolumeMounts: [] + ## StatsD metrics service configuration + ## + service: + ## @param metrics.service.ports.ingest StatsD exporter ingest service port (used for the metrics ingestion from Airflow components) + ## @param metrics.service.ports.metrics StatsD exporter metrics service port (used to expose Prometheus metrics) + ## + ports: + ingest: 9125 + metrics: 9102 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for the StatsD metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Metrics Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param metrics.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param metrics.networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports Metrics is listening + ## on. When true, Metrics will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param metrics.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param metrics.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param metrics.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param metrics.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param metrics.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Airflow database parameters + +## PostgreSQL chart configuration +## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml +## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart +## @param postgresql.auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user +## @param postgresql.auth.username Name for a custom user to create +## @param postgresql.auth.password Password for the custom user to create +## @param postgresql.auth.database Name for a custom database to create +## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials +## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`) +## +postgresql: + enabled: false + auth: + enablePostgresUser: true + username: bn_airflow + password: "" + database: bitnami_airflow + existingSecret: "" + architecture: standalone + primary: + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param postgresql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param postgresql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} +## External PostgreSQL configuration +## All of these values are only used when postgresql.enabled is set to false +## @param externalDatabase.host Database host (ignored if externalDatabase.sqlConnection is set) +## @param externalDatabase.port Database port number (ignored if externalDatabase.sqlConnection is set) +## @param externalDatabase.user Non-root username for Airflow (ignored if externalDatabase.sqlConnection is set) +## @param externalDatabase.password Password for the non-root username for Airflow (ignored if externalDatabase.sqlConnection or externalDatabase.existingSecret are set) +## @param externalDatabase.database Airflow database name (ignored if externalDatabase.sqlConnection is set) +## @param externalDatabase.sqlConnection SQL connection string +## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials +## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials (ignored if externalDatabase.existingSecretSqlConnectionKey is set) +## @param externalDatabase.existingSecretSqlConnectionKey Name of an existing secret key containing the SQL connection string +## +externalDatabase: + host: localhost + port: 5432 + user: bn_airflow + database: drycc_airflow + password: "" + sqlConnection: "" + existingSecret: "" + existingSecretPasswordKey: "" + existingSecretSqlConnectionKey: "" +## Redis® chart configuration +## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml +## @param redis.enabled Switch to enable or disable the Redis® helm +## @param redis.auth.enabled Enable password authentication +## @param redis.auth.password Redis® password +## @param redis.auth.existingSecret The name of an existing secret with Redis® credentials +## @param redis.architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +redis: + enabled: false + auth: + enabled: true + ## Redis® password (both master and slave). Defaults to a random 10-character alphanumeric string if not set and auth.enabled is true. + ## It should always be set using the password value or in the existingSecret to avoid issues + ## with Airflow. + ## The password value is ignored if existingSecret is set + password: "" + existingSecret: "" + architecture: standalone + master: + ## @param redis.master.service.ports.redis Redis® port + ## + service: + ports: + redis: 6379 + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param redis.master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param redis.master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + +## External broker url; configuration +# redis://[USER:PASSWORD@]REDIS_ADDRESS[:PORT][/VIRTUALHOST] +celeryBrokerUrl: "" + +# Example: ‘{{ "master_name": "mycluster" }, "sentinel_kwargs": { "password": "password" }}’ +celeryBrokerTransportOption: '' diff --git a/addons/airflow/3/meta.yaml b/addons/airflow/3/meta.yaml new file mode 100644 index 00000000..0ed38dc8 --- /dev/null +++ b/addons/airflow/3/meta.yaml @@ -0,0 +1,111 @@ +name: airflow-3 +version: "3" +id: 3c126f34-31aa-4a29-8628-bb7a0da15498 +description: "airflow-3" +displayName: "airflow-3" +metadata: + displayName: "airflow-3" + provider: + name: drycc + supportURL: http://airflow.apache.org/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/airflow +tags: airflow +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "auth.password" + required: false + description: "auth.password config for values.yaml" +- name: "auth.fernetKey" + required: false + description: "auth.fernetKey config for values.yaml" +- name: "dags" + required: false + description: "dags config for values.yaml" +- name: "plugins" + required: false + description: "plugins config for values.yaml" +- name: "configuration" + required: false + description: "configuration config for values.yaml" +- name: "extraEnvVars" + required: false + description: "extraEnvVars config for values.yaml" +- name: "image" + required: false + description: "image config for values.yaml" +- name: "web.baseUrl" + required: false + description: "web.baseUrl config for values.yaml" +- name: "web.nodeSelector" + required: false + description: "web nodeSelector config for values.yaml" +- name: "web.extraEnvVars" + required: false + description: "web extraEnvVars config for values.yaml" +- name: "web.podSecurityContext.containerSecurityContext" + required: false + description: "web.podSecurityContext.containerSecurityContext config for values.yaml" +- name: "scheduler.nodeSelector" + required: false + description: "scheduler nodeSelector config for values.yaml" +- name: "scheduler.extraEnvVars" + required: false + description: "scheduler extraEnvVars config for values.yaml" +- name: "scheduler.podSecurityContext.containerSecurityContext" + required: false + description: "scheduler.podSecurityContext.containerSecurityContext config for values.yaml" +- name: "worker.nodeSelector" + required: false + description: "worker nodeSelector config for values.yaml" +- name: "worker.extraEnvVars" + required: false + description: "worker extraEnvVars config for values.yaml" +- name: "worker.persistence" + required: false + description: "worker persistence config for values.yaml" +- name: "worker.podSecurityContext.containerSecurityContext" + required: false + description: "worker.podSecurityContext.containerSecurityContext config for values.yaml" +- name: "triggerer.nodeSelector" + required: false + description: "triggerer nodeSelector config for values.yaml" +- name: "triggerer.extraEnvVars" + required: false + description: "triggerer extraEnvVars config for values.yaml" +- name: "triggerer.persistence" + required: false + description: "triggerer persistence config for values.yaml" +- name: "dagProcessor.extraEnvVars" + required: false + description: "dagProcessor extraEnvVars config for values.yaml" +- name: "dagProcessor.nodeSelector" + required: false + description: "dagProcessor nodeSelector config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "metrics.enabled" + required: false + description: "metrics enabled or not config for values.yaml" +- name: "metrics.nodeSelector" + required: false + description: "metrics nodeSelector config for values.yaml" +- name: pip + required: false + description: "pip config for values.yaml" +- name: requirements + required: false + description: "requirements config for values.yaml" +- name: "externalDatabase" + required: true + description: "externalDatabase config for values.yaml" +- name: celeryBrokerUrl + required: true + description: "celeryBrokerUrl config for values.yaml" +- name: celeryBrokerTransportOption + required: false + description: "celeryBrokerTransportOption config for values.yaml" +archive: false diff --git a/addons/airflow/3/plans/standard-16c48g2w/bind.yaml b/addons/airflow/3/plans/standard-16c48g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-16c48g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-16c48g2w/meta.yaml b/addons/airflow/3/plans/standard-16c48g2w/meta.yaml new file mode 100644 index 00000000..6e873906 --- /dev/null +++ b/addons/airflow/3/plans/standard-16c48g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c48g2w" +id: 6343151a-a925-4f68-aab1-54f83dcacfb2 +description: "airflow standard-16c48g2w plan which limit resources 2 workers per worker 16 cores memory size 48Gi." +displayName: "standard-16c48g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-16c48g2w/values.yaml b/addons/airflow/3/plans/standard-16c48g2w/values.yaml new file mode 100644 index 00000000..a30c0bd9 --- /dev/null +++ b/addons/airflow/3/plans/standard-16c48g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-16c48g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 16 + memory: 48Gi + requests: + cpu: 2 + memory: 6Gi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 16 + memory: 48Gi + requests: + cpu: 2 + memory: 6Gi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 16 + memory: 48Gi + requests: + cpu: 2 + memory: 6Gi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 16 + memory: 48Gi + requests: + cpu: 2 + memory: 6Gi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 16 + memory: 48Gi + requests: + cpu: 2 + memory: 6Gi \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-1c2g2w/bind.yaml b/addons/airflow/3/plans/standard-1c2g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-1c2g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-1c2g2w/meta.yaml b/addons/airflow/3/plans/standard-1c2g2w/meta.yaml new file mode 100644 index 00000000..6ab19166 --- /dev/null +++ b/addons/airflow/3/plans/standard-1c2g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g2w" +id: b2f776dc-9434-4d00-a9e0-da7f18f91a17 +description: "airflow standard-1c2g2w plan which limit resources 2 workers per worker 1 core memory size 2Gi." +displayName: "standard-1c2g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-1c2g2w/values.yaml b/addons/airflow/3/plans/standard-1c2g2w/values.yaml new file mode 100644 index 00000000..67b9b46c --- /dev/null +++ b/addons/airflow/3/plans/standard-1c2g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-1c2g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi diff --git a/addons/airflow/3/plans/standard-24c64g7w/bind.yaml b/addons/airflow/3/plans/standard-24c64g7w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-24c64g7w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json b/addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-24c64g7w/meta.yaml b/addons/airflow/3/plans/standard-24c64g7w/meta.yaml new file mode 100644 index 00000000..f190be55 --- /dev/null +++ b/addons/airflow/3/plans/standard-24c64g7w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-24c64g7w" +id: d657f880-0282-414a-82f4-078ba98a01f4 +description: "airflow standard-24c64g7w plan which limit resources 7 workers per worker 24 cores memory size 64Gi." +displayName: "standard-24c64g7w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-24c64g7w/values.yaml b/addons/airflow/3/plans/standard-24c64g7w/values.yaml new file mode 100644 index 00000000..792cb09f --- /dev/null +++ b/addons/airflow/3/plans/standard-24c64g7w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-24c64g7w + +web: + replicaCount: 1 + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + +worker: + replicaCount: 7 + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-2c4g2w/bind.yaml b/addons/airflow/3/plans/standard-2c4g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-2c4g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-2c4g2w/meta.yaml b/addons/airflow/3/plans/standard-2c4g2w/meta.yaml new file mode 100644 index 00000000..c7a74242 --- /dev/null +++ b/addons/airflow/3/plans/standard-2c4g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g2w" +id: 78ef5a9a-6eb5-43f2-83b3-577988cd6bc1 +description: "airflow standard-2c4g2w plan which limit resources 2 workers per worker 2 cores memory size 4Gi." +displayName: "standard-2c4g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-2c4g2w/values.yaml b/addons/airflow/3/plans/standard-2c4g2w/values.yaml new file mode 100644 index 00000000..2a4f94e4 --- /dev/null +++ b/addons/airflow/3/plans/standard-2c4g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-2c4g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi diff --git a/addons/airflow/3/plans/standard-4c16g2w/bind.yaml b/addons/airflow/3/plans/standard-4c16g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c16g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-4c16g2w/meta.yaml b/addons/airflow/3/plans/standard-4c16g2w/meta.yaml new file mode 100644 index 00000000..afec4612 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c16g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g2w" +id: 3eb0d2e1-66bc-4af5-b531-cbe31d740a5c +description: "airflow standard-4c16g2w plan which limit resources 2 workers per worker 4 cores memory size 16Gi." +displayName: "standard-4c16g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-4c16g2w/values.yaml b/addons/airflow/3/plans/standard-4c16g2w/values.yaml new file mode 100644 index 00000000..4c7ca071 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c16g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-4c16g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 500m + memory: 2Gi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 500m + memory: 2Gi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 500m + memory: 2Gi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 500m + memory: 2Gi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 500m + memory: 2Gi \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-4c8g2w/bind.yaml b/addons/airflow/3/plans/standard-4c8g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c8g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-4c8g2w/meta.yaml b/addons/airflow/3/plans/standard-4c8g2w/meta.yaml new file mode 100644 index 00000000..0f6ce6f3 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c8g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g2w" +id: 0fcc23aa-44f8-4fb0-ab6f-d2c96bbacdc5 +description: "airflow standard-4c8g2w plan which limit resources 2 workers per worker 4 cores memory size 8Gi." +displayName: "standard-4c8g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-4c8g2w/values.yaml b/addons/airflow/3/plans/standard-4c8g2w/values.yaml new file mode 100644 index 00000000..134873c5 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c8g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-4c8g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi diff --git a/addons/airflow/3/plans/standard-8c32g2w/bind.yaml b/addons/airflow/3/plans/standard-8c32g2w/bind.yaml new file mode 100644 index 00000000..7003a652 --- /dev/null +++ b/addons/airflow/3/plans/standard-8c32g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "airflow.web.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json b/addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-8c32g2w/meta.yaml b/addons/airflow/3/plans/standard-8c32g2w/meta.yaml new file mode 100644 index 00000000..52e80451 --- /dev/null +++ b/addons/airflow/3/plans/standard-8c32g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g2w" +id: 019e7635-cbc5-432f-9559-aeed8fa3a4d4 +description: "airflow standard-8c32g2w plan which limit resources 2 workers per worker 8 cores memory size 32Gi." +displayName: "standard-8c32g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/airflow/3/plans/standard-8c32g2w/values.yaml b/addons/airflow/3/plans/standard-8c32g2w/values.yaml new file mode 100644 index 00000000..d003d29d --- /dev/null +++ b/addons/airflow/3/plans/standard-8c32g2w/values.yaml @@ -0,0 +1,53 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-8c32g2w + +web: + replicaCount: 1 + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 1 + memory: 4Gi + +scheduler: + replicaCount: 1 + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 1 + memory: 4Gi + +worker: + replicaCount: 2 + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 1 + memory: 4Gi + +dagProcessor: + replicaCount: 1 + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 1 + memory: 4Gi + +triggerer: + replicaCount: 1 + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 1 + memory: 4Gi \ No newline at end of file diff --git a/addons/index.yaml b/addons/index.yaml index 8aec83b4..a34e0b33 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -3,6 +3,8 @@ entries: airflow: - version: 2 description: "Apache Airflow is a platform that allows you to create, schedule and monitor workflows using Python, Jinja and a message queue" + - version: 3 + description: "Apache Airflow is a platform that allows you to create, schedule and monitor workflows using Python, Jinja and a message queue" cloudbeaver: - version: 23 description: "Cloud Database Manager." From ab1e4ed5241b06837d53c656f789c9b40eaa4857 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 3 Jun 2025 10:28:02 +0800 Subject: [PATCH 53/93] chore(airflow-3): worker mount volume --- .../airflow/3/chart/airflow-3/templates/worker/statefulset.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml index f1111160..44fb1376 100644 --- a/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml +++ b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml @@ -221,6 +221,8 @@ spec: - name: empty-dir mountPath: /opt/drycc/airflow/webserver_config.py subPath: app-base-dir/webserver_config.py + - name: data + mountPath: /data {{- if .Values.requirements }} - name: req-config mountPath: /drycc/python/requirements.txt From 478d09c5dd1075267aaded0a32c6e4a6fe3e8559 Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 4 Jun 2025 11:40:47 +0800 Subject: [PATCH 54/93] fix(mysql-cluster): fix param group_replication_meesage_cache_size (#105) --- addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml index 5c34d122..20775ba1 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml @@ -25,7 +25,7 @@ primary: innodb_write_io_threads=4 innodb_buffer_pool_instances=2 innodb_buffer_pool_size=2147483648 - group_replication_message_cache_size=536870912 + loose-group_replication_message_cache_size=536870912 max_connections=1000 resources: limits: From 52510e2e21e892a15d3bd03a61a66f57f55d9f7f Mon Sep 17 00:00:00 2001 From: Eamon Date: Fri, 6 Jun 2025 13:05:30 +0800 Subject: [PATCH 55/93] chore(fluentbit): adjust input memory limit (#106) --- addons/fluentbit/2/chart/fluentbit/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/addons/fluentbit/2/chart/fluentbit/values.yaml b/addons/fluentbit/2/chart/fluentbit/values.yaml index d665a248..52f9d738 100644 --- a/addons/fluentbit/2/chart/fluentbit/values.yaml +++ b/addons/fluentbit/2/chart/fluentbit/values.yaml @@ -189,6 +189,7 @@ daemonset: Tag_Regex (?[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?[^_]+)_(?.+)-(?[a-z0-9]{64})\.log$ Read_from_Head false multiline.parser docker,cri + Mem_Buf_Limit 30M ## https://docs.fluentbit.io/manual/pipeline/filters filters_base: | From b35f17975a1c5fa8a0ef642edb77da8c1b097744 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 9 Jun 2025 16:26:20 +0800 Subject: [PATCH 56/93] chore(airflow): job rm hook cause helm install wait --- addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml | 3 --- addons/airflow/3/chart/airflow-3/values.yaml | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml b/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml index 2bff7cc8..5abf5413 100644 --- a/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml +++ b/addons/airflow/3/chart/airflow-3/templates/setup-db-job.yaml @@ -11,9 +11,6 @@ metadata: namespace: {{ include "common.names.namespace" . | quote }} labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} app.kubernetes.io/component: setup-db - {{- $defaultAnnotations := dict "helm.sh/hook" "post-install,post-upgrade" "helm.sh/hook-delete-policy" "before-hook-creation,hook-succeeded" }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.setupDBJob.annotations .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} spec: backoffLimit: {{ .Values.setupDBJob.backoffLimit }} template: diff --git a/addons/airflow/3/chart/airflow-3/values.yaml b/addons/airflow/3/chart/airflow-3/values.yaml index f0e7d960..58b82dcc 100644 --- a/addons/airflow/3/chart/airflow-3/values.yaml +++ b/addons/airflow/3/chart/airflow-3/values.yaml @@ -111,7 +111,7 @@ image: pullSecrets: [] ## Set to true if you would like to see extra information on logs ## - debug: true + debug: false ## Authentication parameters ## ref: https://github.com/bitnami/containers/tree/main/bitnami/airflow#environment-variables From 6c5b6ff62efd341cbc4b3fa55f306b9467a81f1d Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 10 Jun 2025 17:09:45 +0800 Subject: [PATCH 57/93] chore(mongodb): Optimize mongodb --- .../7.0/chart/mongodb/templates/replicaset/statefulset.yaml | 2 +- addons/mongodb/7.0/chart/mongodb/values.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml b/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml index c45daffe..eddaae09 100644 --- a/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml +++ b/addons/mongodb/7.0/chart/mongodb/templates/replicaset/statefulset.yaml @@ -403,7 +403,7 @@ spec: {{- else }} args: - | - mongodb_exporter --collect-all --compatible-mode --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + mongodb_exporter --collector.replicasetstatus --collector.dbstats --collector.indexstats --collector.collstats --compatible-mode --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} {{- end }} env: {{- if .Values.auth.enabled }} diff --git a/addons/mongodb/7.0/chart/mongodb/values.yaml b/addons/mongodb/7.0/chart/mongodb/values.yaml index ae599f98..ac06f422 100644 --- a/addons/mongodb/7.0/chart/mongodb/values.yaml +++ b/addons/mongodb/7.0/chart/mongodb/values.yaml @@ -1912,7 +1912,7 @@ metrics: ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: - enabled: true + enabled: false initialDelaySeconds: 60 periodSeconds: 30 timeoutSeconds: 5 @@ -1928,7 +1928,7 @@ metrics: ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: - enabled: true + enabled: false initialDelaySeconds: 60 periodSeconds: 30 timeoutSeconds: 5 From 75fcf3fcb63b4e07491136b5494bf1afc5fbcc90 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 10 Jun 2025 17:41:15 +0800 Subject: [PATCH 58/93] feat(opensearch): add version 3.0 --- addons/index.yaml | 2 + .../2.10/plans/standard-1c2g16/meta.yaml | 6 - .../3.0/chart/opensearch-3.0/Chart.yaml | 24 + .../3.0/chart/opensearch-3.0/README.md | 1153 +++++ .../chart/opensearch-3.0/templates/NOTES.txt | 126 + .../opensearch-3.0/templates/_helpers.tpl | 859 ++++ .../opensearch-3.0/templates/configmap.yaml | 25 + .../templates/coordinating/hpa.yaml | 41 + .../templates/coordinating/metrics-svc.yaml | 31 + .../templates/coordinating/networkpolicy.yaml | 91 + .../templates/coordinating/pdb.yaml | 28 + .../coordinating/serviceaccount.yaml | 19 + .../coordinating/servicemonitor.yaml | 52 + .../templates/coordinating/statefulset.yaml | 347 ++ .../templates/coordinating/svc-headless.yaml | 35 + .../templates/coordinating/vpa.yaml | 45 + .../templates/dashboards/deployment.yaml | 239 ++ .../templates/dashboards/hpa.yaml | 41 + .../dashboards/ingress-tls-secret.yaml | 46 + .../templates/dashboards/ingress.yaml | 58 + .../templates/dashboards/networkpolicy.yaml | 93 + .../templates/dashboards/pdb.yaml | 28 + .../templates/dashboards/pvc.yaml | 35 + .../templates/dashboards/service.yaml | 54 + .../templates/dashboards/serviceaccount.yaml | 19 + .../templates/dashboards/tls-secret.yaml | 28 + .../templates/dashboards/vpa.yaml | 45 + .../opensearch-3.0/templates/data/hpa.yaml | 41 + .../templates/data/metrics-svc.yaml | 32 + .../templates/data/networkpolicy.yaml | 90 + .../opensearch-3.0/templates/data/pdb.yaml | 28 + .../templates/data/serviceaccount.yaml | 19 + .../templates/data/servicemonitor.yaml | 52 + .../templates/data/statefulset.yaml | 413 ++ .../templates/data/svc-headless.yaml | 35 + .../opensearch-3.0/templates/data/vpa.yaml | 45 + .../opensearch-3.0/templates/extra-list.yaml | 9 + .../opensearch-3.0/templates/ingest/hpa.yaml | 41 + .../templates/ingest/ingress.yaml | 58 + .../templates/ingest/metrics-svc.yaml | 32 + .../templates/ingest/networkpolicy.yaml | 88 + .../opensearch-3.0/templates/ingest/pdb.yaml | 28 + .../templates/ingest/service.yaml | 60 + .../templates/ingest/serviceaccount.yaml | 19 + .../templates/ingest/servicemonitor.yaml | 52 + .../templates/ingest/statefulset.yaml | 347 ++ .../templates/ingest/svc-headless.yaml | 35 + .../opensearch-3.0/templates/ingest/vpa.yaml | 45 + .../templates/ingress-tls-secrets.yaml | 87 + .../opensearch-3.0/templates/ingress.yaml | 58 + .../templates/initialization-configmap.yaml | 18 + .../opensearch-3.0/templates/master/hpa.yaml | 41 + .../templates/master/metrics-svc.yaml | 32 + .../templates/master/networkpolicy.yaml | 88 + .../opensearch-3.0/templates/master/pdb.yaml | 28 + .../templates/master/serviceaccount.yaml | 19 + .../templates/master/servicemonitor.yaml | 52 + .../templates/master/statefulset.yaml | 425 ++ .../templates/master/svc-headless.yaml | 35 + .../opensearch-3.0/templates/master/vpa.yaml | 45 + .../opensearch-3.0/templates/secrets.yaml | 44 + .../opensearch-3.0/templates/service.yaml | 65 + .../templates/snapshots/init-job.yaml | 179 + .../templates/snapshots/networkpolicy.yaml | 50 + .../templates/snapshots/policies-cm.yaml | 28 + .../templates/snapshots/pvc.yaml | 37 + .../templates/snapshots/repos-cm.yaml | 29 + .../opensearch-3.0/templates/tls-secret.yaml | 126 + .../3.0/chart/opensearch-3.0/values.yaml | 3714 +++++++++++++++++ addons/opensearch/3.0/meta.yaml | 66 + .../plans/standard-2c4g32}/bind.yaml | 0 .../create-instance-schema.json | 0 .../3.0/plans/standard-2c4g32/meta.yaml | 6 + .../plans/standard-2c4g32}/values.yaml | 18 +- .../3.0/plans/standard-2c4g64/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-2c4g64/meta.yaml | 6 + .../3.0/plans/standard-2c4g64/values.yaml | 157 + .../3.0/plans/standard-4c16g256/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-4c16g256/meta.yaml | 7 + .../3.0/plans/standard-4c16g256/values.yaml | 157 + .../3.0/plans/standard-4c16g512/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-4c16g512/meta.yaml | 6 + .../3.0/plans/standard-4c16g512/values.yaml | 157 + .../3.0/plans/standard-4c8g128/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-4c8g128/meta.yaml | 6 + .../3.0/plans/standard-4c8g128/values.yaml | 157 + .../3.0/plans/standard-8c32g1024/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-8c32g1024/meta.yaml | 6 + .../3.0/plans/standard-8c32g1024/values.yaml | 157 + .../3.0/plans/standard-8c32g2048/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-8c32g2048/meta.yaml | 6 + .../3.0/plans/standard-8c32g2048/values.yaml | 157 + .../3.0/plans/standard-8c32g768/bind.yaml | 123 + .../create-instance-schema.json | 12 + .../3.0/plans/standard-8c32g768/meta.yaml | 6 + .../3.0/plans/standard-8c32g768/values.yaml | 157 + 102 files changed, 12501 insertions(+), 15 deletions(-) delete mode 100644 addons/opensearch/2.10/plans/standard-1c2g16/meta.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/Chart.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/README.md create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/NOTES.txt create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/_helpers.tpl create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/configmap.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/hpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/metrics-svc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/pdb.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/serviceaccount.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/servicemonitor.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/statefulset.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/svc-headless.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/vpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/deployment.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/hpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress-tls-secret.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pdb.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pvc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/service.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/serviceaccount.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/tls-secret.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/vpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/hpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/metrics-svc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/pdb.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/serviceaccount.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/servicemonitor.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/statefulset.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/svc-headless.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/data/vpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/extra-list.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/hpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/ingress.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/metrics-svc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/pdb.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/service.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/serviceaccount.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/servicemonitor.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/statefulset.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/svc-headless.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/vpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress-tls-secrets.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/initialization-configmap.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/hpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/metrics-svc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/pdb.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/serviceaccount.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/servicemonitor.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/statefulset.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/svc-headless.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/master/vpa.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/secrets.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/service.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/init-job.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/networkpolicy.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/policies-cm.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/pvc.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/repos-cm.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/templates/tls-secret.yaml create mode 100644 addons/opensearch/3.0/chart/opensearch-3.0/values.yaml create mode 100644 addons/opensearch/3.0/meta.yaml rename addons/opensearch/{2.10/plans/standard-1c2g16 => 3.0/plans/standard-2c4g32}/bind.yaml (100%) rename addons/opensearch/{2.10/plans/standard-1c2g16 => 3.0/plans/standard-2c4g32}/create-instance-schema.json (100%) create mode 100644 addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml rename addons/opensearch/{2.10/plans/standard-1c2g16 => 3.0/plans/standard-2c4g32}/values.yaml (97%) create mode 100644 addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-2c4g64/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g256/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-4c16g256/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g256/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g512/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-4c16g512/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c16g512/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c8g128/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-4c8g128/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-4c8g128/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g1024/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-8c32g1024/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g2048/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-8c32g2048/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g768/bind.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json create mode 100644 addons/opensearch/3.0/plans/standard-8c32g768/meta.yaml create mode 100644 addons/opensearch/3.0/plans/standard-8c32g768/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index a34e0b33..3c5fee53 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -34,6 +34,8 @@ entries: opensearch: - version: "2.10" description: "OpenSearch is a scalable, flexible, and extensible open-source software suite for search, analytics, and observability applications licensed under Apache 2.0." + - version: "3.0" + description: "OpenSearch is a scalable, flexible, and extensible open-source software suite for search, analytics, and observability applications licensed under Apache 2.0." prometheus: - version: 2 description: "Prometheus is an open-source systems monitoring and alerting toolkit originally built at SoundCloud." diff --git a/addons/opensearch/2.10/plans/standard-1c2g16/meta.yaml b/addons/opensearch/2.10/plans/standard-1c2g16/meta.yaml deleted file mode 100644 index 0fc7f941..00000000 --- a/addons/opensearch/2.10/plans/standard-1c2g16/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-1c2g16" -id: ba01d605-e665-475d-97b5-6072cfbf07db -description: "Opensearch standard-1c2g16 plan which limit resources 1 cores 2Gi memory and persistence size 16Gi." -displayName: "standard-1c2g16" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/Chart.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/Chart.yaml new file mode 100644 index 00000000..062c05ba --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/Chart.yaml @@ -0,0 +1,24 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: Analytics + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.0.0 +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.4 +description: OpenSearch is a scalable open-source solution for search, analytics, and observability. Features full-text queries, natural language processing, custom dictionaries, amongst others. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/opensearch/img/opensearch-stack-220x234.png +keywords: + - opensearch +maintainers: + - name: Drycc Community. + url: https://github.com/bitnami/charts +name: opensearch +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/opensearch +version: 2.0.3 diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/README.md b/addons/opensearch/3.0/chart/opensearch-3.0/README.md new file mode 100644 index 00000000..bca7da67 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/README.md @@ -0,0 +1,1153 @@ + + +# OpenSearch packaged by Bitnami + +OpenSearch is a scalable open-source solution for search, analytics, and observability. Features full-text queries, natural language processing, custom dictionaries, amongst others. + +[Overview of OpenSearch](https://opensearch.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/opensearch +``` + +## Introduction + +This chart bootstraps a [OpenSearch](https://github.com/bitnami/containers/tree/main/bitnami/opensearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use OpenSearch in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/opensearch +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +These commands deploy OpenSearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Update credentials + +Bitnami charts configure credentials at first boot. Any further change in the secrets or credentials require manual intervention. Follow these instructions: + +- Update the user password following [the upstream documentation](https://opster.com/guides/opensearch/opensearch-security/changing-admin-password-opensearch/) +- Update the password secret with the new values (replace the SECRET_NAME PASSWORD, DASHBOARDS_PASSWORD, LOGSTASH_PASSWORD placeholders) + +```shell +kubectl create secret generic SECRET_NAME --from-literal=opensearch-password=PASSWORD --from-literal=opensearch-dashboards-password=DASHBOARDS_PASSWORD --from-literal=logstash-password=LOGSTASH_PASSWORD --dry-run -o yaml | kubectl apply -f - +``` + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `*.metrics.enabled` (under the `data`, `ingest`, `master` and `coordinating` sections) to `true`. This will expose a Prometheus endpoint using the [Opensearch Prometheus plugin](https://github.com/Aiven-Open/prometheus-exporter-plugin-for-opensearch). The Opensearch service will be have the necessary annotations to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `*.metrics.serviceMonitor.enabled=true` (under the `data`, `ingest`, `master` and `coordinating` sections). Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### Securing traffic using TLS + +Opensearch can encrypt communications by setting `security.tls.enabled=true`. It is possible to configure the application to use `PEM` certificates or `JKS` keystores by setting `security.tls.usePEMCerts=true`. For the TLS secret management, the chart allows two configuration options: + +- Provide your own secrets using the `*.existingSecret` (under the `security.tls.admin`, `security.tls.master`, `security.tls.data`, `security.tls.ingest`, `security.tls.ingest`) value. Also set the correct name of the certificate files using the `certKey`, `keyKey` values. +- Have the chart auto-generate the certificates using `security.tls.autoGenerated=true`. + +### Change OpenSearch version + +To modify the OpenSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/opensearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Default kernel settings + +Currently, OpenSearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the OS containers fail to boot with ERROR messages. More information about these requirements can be found here: + +- + +This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: OPENSEARCH_VERSION + value: 7.0 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Using custom init scripts + +For advanced operations, the Bitnami OpenSearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values. + +```console +initScriptsCM=special-scripts +initScriptsSecret=special-scripts-sensitive +``` + +### Snapshot and restore operations + +As it's described in the [official documentation](https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/snapshots/snapshot-restore/#register-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations. + +This chart allows you to configure snapshot repositories and snapshot policies in OpenSearch. +A minimal configuration example looks like this: + +```yaml +snapshots: + enabled: true + persistence: + enabled: true +snapshotRepoPath: "/snapshots" +``` + +For details, please refer to the `snapshots.*` documentation below. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as OpenSearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +### Backup and restore + +To back up and restore Helm chart deployments on Kubernetes, you need to back up the persistent volumes from the source deployment and attach them to a new deployment using [Velero](https://velero.io/), a Kubernetes backup/restore tool. Find the instructions for using Velero in [this guide](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-backup-restore-deployments-velero-index.html). + +## Persistence + +The [Bitnami OpenSearch](https://github.com/bitnami/containers/tree/main/bitnami/opensearch) image stores the OpenSearch data at the `/bitnami/opensearch/data` path of the container. + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `apiVersions` | Override Kubernetes API versions reported by .Capabilities | `[]` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### OpenSearch cluster Parameters + +| Name | Description | Value | +| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `clusterName` | OpenSearch cluster name | `open` | +| `containerPorts.restAPI` | OpenSearch REST API port | `9200` | +| `containerPorts.transport` | OpenSearch Transport port | `9300` | +| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` | +| `snapshotRepoPath` | File System snapshot repository path | `""` | +| `config` | Override opensearch configuration | `{}` | +| `extraConfig` | Append extra configuration to the opensearch node configuration | `{}` | +| `extraHosts` | A list of external hosts which are part of this cluster | `[]` | +| `extraVolumes` | A list of volumes to be added to the pod | `[]` | +| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` | +| `extraEnvVarsCM` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `sidecars` | Add additional sidecar containers to the all opensearch node pod(s) | `[]` | +| `initContainers` | Add additional init containers to the all opensearch node pod(s) | `[]` | +| `useIstioLabels` | Use this variable to add Istio labels to all pods | `true` | +| `image.registry` | OpenSearch image registry | `REGISTRY_NAME` | +| `image.repository` | OpenSearch image repository | `REPOSITORY_NAME/opensearch` | +| `image.digest` | OpenSearch image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | OpenSearch image pull policy | `IfNotPresent` | +| `image.pullSecrets` | OpenSearch image pull secrets | `[]` | +| `image.debug` | Enable OpenSearch image debug mode | `false` | +| `security.enabled` | Enable X-Pack Security settings | `false` | +| `security.adminPassword` | Password for 'admin' user | `""` | +| `security.logstashPassword` | Password for Logstash | `""` | +| `security.existingSecret` | Name of the existing secret containing the OpenSearch password and | `""` | +| `security.fipsMode` | Configure opensearch with FIPS 140 compliant mode | `false` | + +### OpenSearch admin parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------- | --------------------------- | +| `security.tls.admin.existingSecret` | Existing secret containing the certificates for admin | `""` | +| `security.tls.admin.certKey` | Key containing the crt for admin certificate (defaults to admin.crt) | `""` | +| `security.tls.admin.keyKey` | Key containing the key for admin certificate (defaults to admin.key) | `""` | +| `security.tls.restEncryption` | Enable SSL/TLS encryption for OpenSearch REST API. | `false` | +| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `true` | +| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` | +| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` | +| `security.tls.master.certKey` | Key containing the crt for master nodes certificate (defaults to tls.crt) | `""` | +| `security.tls.master.keyKey` | Key containing the key for master nodes certificate (defaults to tls.key) | `""` | +| `security.tls.master.caKey` | Key containing the ca for master nodes certificate (defaults to ca.crt) | `""` | +| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` | +| `security.tls.data.certKey` | Key containing the crt for data nodes certificate (defaults to tls.crt) | `""` | +| `security.tls.data.keyKey` | Key containing the key for data nodes certificate (defaults to tls.key) | `""` | +| `security.tls.data.caKey` | Key containing the ca for data nodes certificate (defaults to ca.crt) | `""` | +| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` | +| `security.tls.ingest.certKey` | Key containing the crt for ingest nodes certificate (defaults to tls.crt) | `""` | +| `security.tls.ingest.keyKey` | Key containing the key for ingest nodes certificate (defaults to tls.key) | `""` | +| `security.tls.ingest.caKey` | Key containing the ca for ingest nodes certificate (defaults to ca.crt) | `""` | +| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` | +| `security.tls.coordinating.certKey` | Key containing the crt for coordinating nodes certificate (defaults to tls.crt) | `""` | +| `security.tls.coordinating.keyKey` | Key containing the key for coordinating nodes certificate (defaults to tls.key) | `""` | +| `security.tls.coordinating.caKey` | Key containing the ca for coordinating nodes certificate (defaults to ca.crt) | `""` | +| `security.tls.keystoreFilename` | Name of the keystore file | `opensearch.keystore.jks` | +| `security.tls.truststoreFilename` | Name of the truststore | `opensearch.truststore.jks` | +| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` | +| `security.tls.passwordsSecret` | Existing secret containing the Keystore and Truststore passwords, or key password if PEM certs are used | `""` | +| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` | +| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` | +| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` | +| `security.tls.secretKeystoreKey` | Name of the secret key containing the Keystore password | `""` | +| `security.tls.secretTruststoreKey` | Name of the secret key containing the Truststore password | `""` | +| `security.tls.secretKey` | Name of the secret key containing the PEM key password | `""` | +| `security.tls.nodesDN` | A comma separated list of DN for nodes | `""` | +| `security.tls.adminDN` | A comma separated list of DN for admins | `""` | + +### Traffic Exposure Parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | OpenSearch service type | `ClusterIP` | +| `service.ports.restAPI` | OpenSearch service REST API port | `9200` | +| `service.ports.transport` | OpenSearch service transport port | `9300` | +| `service.nodePorts.restAPI` | Node port for REST API | `""` | +| `service.nodePorts.transport` | Node port for REST API | `""` | +| `service.clusterIP` | OpenSearch service Cluster IP | `""` | +| `service.loadBalancerIP` | OpenSearch service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | OpenSearch service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | OpenSearch service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for OpenSearch service | `{}` | +| `service.extraPorts` | Extra ports to expose in OpenSearch service (normally used with the `sidecars` value) | `[]` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress record generation for OpenSearch | `false` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `opensearch.local` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | + +### Master-eligible nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `master.masterOnly` | Deploy the OpenSearch master-eligible nodes as master-only nodes. Recommended for high-demand deployments. | `true` | +| `master.replicaCount` | Number of master-eligible replicas to deploy | `2` | +| `master.extraRoles` | Append extra roles to the node role | `[]` | +| `master.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `master.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `master.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `master.pdb.minAvailable` and `master.pdb.maxUnavailable` are empty. | `""` | +| `master.nameOverride` | String to partially override opensearch.master.fullname | `""` | +| `master.fullnameOverride` | String to fully override opensearch.master.fullname | `""` | +| `master.servicenameOverride` | String to fully override opensearch.master.servicename | `""` | +| `master.annotations` | Annotations for the master statefulset | `{}` | +| `master.updateStrategy.type` | Master-eligible nodes statefulset strategy type | `RollingUpdate` | +| `master.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). | `medium` | +| `master.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `master.heapSize` | OpenSearch master-eligible node heap size. | `512m` | +| `master.podSecurityContext.enabled` | Enabled master-eligible pods' Security Context | `true` | +| `master.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `master.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `master.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `master.podSecurityContext.fsGroup` | Set master-eligible pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `master.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `master.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `master.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `master.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `master.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `master.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `master.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `master.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `master.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `master.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `master.hostAliases` | master-eligible pods host aliases | `[]` | +| `master.podLabels` | Extra labels for master-eligible pods | `{}` | +| `master.podAnnotations` | Annotations for master-eligible pods | `{}` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for master-eligible pods assignment | `{}` | +| `master.nodeSelector` | Node labels for master-eligible pods assignment | `{}` | +| `master.tolerations` | Tolerations for master-eligible pods assignment | `[]` | +| `master.priorityClassName` | master-eligible pods' priorityClassName | `""` | +| `master.schedulerName` | Name of the k8s scheduler (other than default) for master-eligible pods | `""` | +| `master.terminationGracePeriodSeconds` | In seconds, time the given to the OpenSearch Master pod needs to terminate gracefully | `""` | +| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `master.podManagementPolicy` | podManagementPolicy to manage scaling operation of OpenSearch master pods | `Parallel` | +| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` | +| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` | +| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` | +| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` | +| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` | +| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `180` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.customStartupProbe` | Override default startup probe | `{}` | +| `master.customLivenessProbe` | Override default liveness probe | `{}` | +| `master.customReadinessProbe` | Override default readiness probe | `{}` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.lifecycleHooks` | for the master-eligible container(s) to automate configuration before or after startup | `{}` | +| `master.extraEnvVars` | Array with extra environment variables to add to master-eligible nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for master-eligible nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for master-eligible nodes | `""` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the master-eligible pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master-eligible container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the master-eligible pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the master-eligible pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` | +| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` | +| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume Size | `8Gi` | +| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `master.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `master.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | +| `master.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `master.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `master.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `master.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `master.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `master.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `master.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `master.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `master.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `master.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `master.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `master.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `master.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `master.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `master.autoscaling.hpa.enabled` | Enable HPA for APISIX Data Plane | `false` | +| `master.autoscaling.hpa.minReplicas` | Minimum number of APISIX Data Plane replicas | `3` | +| `master.autoscaling.hpa.maxReplicas` | Maximum number of APISIX Data Plane replicas | `11` | +| `master.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `master.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `master.service.headless.annotations` | Annotations for the Master-eligible headless service. | `{}` | +| `master.service.headless.nameOverride` | String to fully override opensearch.master.servicename | `""` | +| `master.service.headless.trafficDistribution` | String Traffic distribution for the master headless service | `PreferClose` | +| `master.metrics.enabled` | Enable master-eligible node metrics | `false` | +| `master.metrics.service.ports.metrics` | master-eligible node metrics service port | `80` | +| `master.metrics.service.clusterIP` | master-eligible node metrics service Cluster IP | `""` | +| `master.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `master.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `master.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `master.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `master.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `10s` | +| `master.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `master.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `master.metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `master.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `master.metrics.rules.enabled` | Enable render extra rules for PrometheusRule object | `false` | +| `master.metrics.rules.spec` | Rules to render into the PrometheusRule object | `[]` | +| `master.metrics.rules.selector` | Selector for the PrometheusRule object | `{}` | +| `master.metrics.rules.namespace` | Namespace where to create the PrometheusRule object | `monitoring` | +| `master.metrics.rules.additionalLabels` | Additional lables to add to the PrometheusRule object | `{}` | + +### Data-only nodes parameters + +| Name | Description | Value | +| -------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `data.replicaCount` | Number of data-only replicas to deploy | `2` | +| `data.extraRoles` | Append extra roles to the node role | `[]` | +| `data.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `data.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `data.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `data.pdb.minAvailable` and `data.pdb.maxUnavailable` are empty. | `""` | +| `data.nameOverride` | String to partially override opensearch.data.fullname | `""` | +| `data.fullnameOverride` | String to fully override opensearch.data.fullname | `""` | +| `data.servicenameOverride` | String to fully override opensearch.data.servicename | `""` | +| `data.annotations` | Annotations for the data statefulset | `{}` | +| `data.updateStrategy.type` | Data-only nodes statefulset strategy type | `RollingUpdate` | +| `data.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if data.resources is set (data.resources is recommended for production). | `medium` | +| `data.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `data.heapSize` | OpenSearch data node heap size. | `1024m` | +| `data.podSecurityContext.enabled` | Enabled data pods' Security Context | `true` | +| `data.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `data.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `data.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `data.podSecurityContext.fsGroup` | Set data pod's Security Context fsGroup | `1001` | +| `data.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `data.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `data.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `data.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `data.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `data.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `data.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `data.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `data.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `data.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `data.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `data.hostAliases` | data pods host aliases | `[]` | +| `data.podLabels` | Extra labels for data pods | `{}` | +| `data.podAnnotations` | Annotations for data pods | `{}` | +| `data.podAffinityPreset` | Pod affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.key` | Node label key to match. Ignored if `data.affinity` is set | `""` | +| `data.nodeAffinityPreset.values` | Node label values to match. Ignored if `data.affinity` is set | `[]` | +| `data.affinity` | Affinity for data pods assignment | `{}` | +| `data.nodeSelector` | Node labels for data pods assignment | `{}` | +| `data.tolerations` | Tolerations for data pods assignment | `[]` | +| `data.priorityClassName` | data pods' priorityClassName | `""` | +| `data.schedulerName` | Name of the k8s scheduler (other than default) for data pods | `""` | +| `data.terminationGracePeriodSeconds` | In seconds, time the given to the OpenSearch data pod needs to terminate gracefully | `""` | +| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `data.podManagementPolicy` | podManagementPolicy to manage scaling operation of OpenSearch data pods | `Parallel` | +| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` | +| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` | +| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` | +| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `180` | +| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` | +| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` | +| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.customStartupProbe` | Override default startup probe | `{}` | +| `data.customLivenessProbe` | Override default liveness probe | `{}` | +| `data.customReadinessProbe` | Override default readiness probe | `{}` | +| `data.command` | Override default container command (useful when using custom images) | `[]` | +| `data.args` | Override default container args (useful when using custom images) | `[]` | +| `data.lifecycleHooks` | for the data container(s) to automate configuration before or after startup | `{}` | +| `data.extraEnvVars` | Array with extra environment variables to add to data nodes | `[]` | +| `data.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data nodes | `""` | +| `data.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data nodes | `""` | +| `data.extraVolumes` | Optionally specify extra list of additional volumes for the data pod(s) | `[]` | +| `data.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the data container(s) | `[]` | +| `data.sidecars` | Add additional sidecar containers to the data pod(s) | `[]` | +| `data.initContainers` | Add additional init containers to the data pod(s) | `[]` | +| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` is set. | `""` | +| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` | +| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `data.persistence.size` | Persistent Volume Size | `8Gi` | +| `data.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `data.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `data.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | +| `data.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `data.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `data.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `data.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `data.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `data.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `data.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `data.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `data.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `data.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `data.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `data.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `data.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `data.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `data.autoscaling.hpa.enabled` | Enable HPA for APISIX Data Plane | `false` | +| `data.autoscaling.hpa.minReplicas` | Minimum number of APISIX Data Plane replicas | `3` | +| `data.autoscaling.hpa.maxReplicas` | Maximum number of APISIX Data Plane replicas | `11` | +| `data.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `data.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `data.service.headless.annotations` | Annotations for the data headless service. | `{}` | +| `data.service.headless.nameOverride` | String to fully override opensearch.data.servicename | `""` | +| `data.service.headless.trafficDistribution` | String Traffic distribution for the data headless service | `PreferClose` | +| `data.metrics.enabled` | Enable data node metrics | `false` | +| `data.metrics.service.ports.metrics` | data node metrics service port | `80` | +| `data.metrics.service.clusterIP` | data node metrics service Cluster IP | `""` | +| `data.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `data.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `data.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `data.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `data.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `10s` | +| `data.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `data.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `data.metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `data.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `data.metrics.rules.enabled` | Enable render extra rules for PrometheusRule object | `false` | +| `data.metrics.rules.spec` | Rules to render into the PrometheusRule object | `[]` | +| `data.metrics.rules.selector` | Selector for the PrometheusRule object | `{}` | +| `data.metrics.rules.namespace` | Namespace where to create the PrometheusRule object | `monitoring` | +| `data.metrics.rules.additionalLabels` | Additional lables to add to the PrometheusRule object | `{}` | + +### Coordinating-only nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `coordinating.replicaCount` | Number of coordinating-only replicas to deploy | `2` | +| `coordinating.extraRoles` | Append extra roles to the node role | `[]` | +| `coordinating.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `coordinating.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `coordinating.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `coordinating.pdb.minAvailable` and `coodinating.pdb.maxUnavailable` are empty. | `""` | +| `coordinating.nameOverride` | String to partially override opensearch.coordinating.fullname | `""` | +| `coordinating.fullnameOverride` | String to fully override opensearch.coordinating.fullname | `""` | +| `coordinating.servicenameOverride` | String to fully override opensearch.coordinating.servicename | `""` | +| `coordinating.annotations` | Annotations for the coordinating-only statefulset | `{}` | +| `coordinating.updateStrategy.type` | Coordinating-only nodes statefulset strategy type | `RollingUpdate` | +| `coordinating.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if coordinating.resources is set (coordinating.resources is recommended for production). | `medium` | +| `coordinating.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `coordinating.heapSize` | OpenSearch coordinating node heap size. | `512m` | +| `coordinating.podSecurityContext.enabled` | Enabled coordinating-only pods' Security Context | `true` | +| `coordinating.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `coordinating.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `coordinating.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `coordinating.podSecurityContext.fsGroup` | Set coordinating-only pod's Security Context fsGroup | `1001` | +| `coordinating.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `coordinating.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `coordinating.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `coordinating.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `coordinating.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `coordinating.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `coordinating.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `coordinating.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `coordinating.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `coordinating.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `coordinating.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `coordinating.hostAliases` | coordinating-only pods host aliases | `[]` | +| `coordinating.podLabels` | Extra labels for coordinating-only pods | `{}` | +| `coordinating.podAnnotations` | Annotations for coordinating-only pods | `{}` | +| `coordinating.podAffinityPreset` | Pod affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.key` | Node label key to match. Ignored if `coordinating.affinity` is set | `""` | +| `coordinating.nodeAffinityPreset.values` | Node label values to match. Ignored if `coordinating.affinity` is set | `[]` | +| `coordinating.affinity` | Affinity for coordinating-only pods assignment | `{}` | +| `coordinating.nodeSelector` | Node labels for coordinating-only pods assignment | `{}` | +| `coordinating.tolerations` | Tolerations for coordinating-only pods assignment | `[]` | +| `coordinating.priorityClassName` | coordinating-only pods' priorityClassName | `""` | +| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) for coordinating-only pods | `""` | +| `coordinating.terminationGracePeriodSeconds` | In seconds, time the given to the OpenSearch coordinating pod needs to terminate gracefully | `""` | +| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `coordinating.podManagementPolicy` | podManagementPolicy to manage scaling operation of OpenSearch coordinating pods | `Parallel` | +| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating-only nodes pod) | `false` | +| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` | +| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `180` | +| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` | +| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.customStartupProbe` | Override default startup probe | `{}` | +| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` | +| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` | +| `coordinating.command` | Override default container command (useful when using custom images) | `[]` | +| `coordinating.args` | Override default container args (useful when using custom images) | `[]` | +| `coordinating.lifecycleHooks` | for the coordinating-only container(s) to automate configuration before or after startup | `{}` | +| `coordinating.extraEnvVars` | Array with extra environment variables to add to coordinating-only nodes | `[]` | +| `coordinating.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for coordinating-only nodes | `""` | +| `coordinating.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for coordinating-only nodes | `""` | +| `coordinating.extraVolumes` | Optionally specify extra list of additional volumes for the coordinating-only pod(s) | `[]` | +| `coordinating.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the coordinating-only container(s) | `[]` | +| `coordinating.sidecars` | Add additional sidecar containers to the coordinating-only pod(s) | `[]` | +| `coordinating.initContainers` | Add additional init containers to the coordinating-only pod(s) | `[]` | +| `coordinating.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `coordinating.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `coordinating.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | +| `coordinating.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `coordinating.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `coordinating.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `coordinating.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `coordinating.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `coordinating.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `coordinating.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `coordinating.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `coordinating.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `coordinating.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `coordinating.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `coordinating.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `coordinating.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `coordinating.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `coordinating.autoscaling.hpa.enabled` | Enable HPA for APISIX Data Plane | `false` | +| `coordinating.autoscaling.hpa.minReplicas` | Minimum number of APISIX Data Plane replicas | `3` | +| `coordinating.autoscaling.hpa.maxReplicas` | Maximum number of APISIX Data Plane replicas | `11` | +| `coordinating.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `coordinating.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `coordinating.service.headless.annotations` | Annotations for the coordinating-only headless service. | `{}` | +| `coordinating.service.headless.nameOverride` | String to fully override opensearch.coordinating.servicename | `""` | +| `coordinating.service.headless.trafficDistribution` | String Traffic distribution for the coordinating headless service | `PreferClose` | +| `coordinating.metrics.enabled` | Enable coordinating node metrics | `false` | +| `coordinating.metrics.service.ports.metrics` | coordinating node metrics service port | `80` | +| `coordinating.metrics.service.clusterIP` | coordinating node metrics service Cluster IP | `""` | +| `coordinating.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `coordinating.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `coordinating.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `coordinating.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `coordinating.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `10s` | +| `coordinating.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `coordinating.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `coordinating.metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `coordinating.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `coordinating.metrics.rules.enabled` | Enable render extra rules for PrometheusRule object | `false` | +| `coordinating.metrics.rules.spec` | Rules to render into the PrometheusRule object | `[]` | +| `coordinating.metrics.rules.selector` | Selector for the PrometheusRule object | `{}` | +| `coordinating.metrics.rules.namespace` | Namespace where to create the PrometheusRule object | `monitoring` | +| `coordinating.metrics.rules.additionalLabels` | Additional lables to add to the PrometheusRule object | `{}` | + +### Ingest-only nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `ingest.enabled` | Enable ingest nodes | `true` | +| `ingest.replicaCount` | Number of ingest-only replicas to deploy | `2` | +| `ingest.extraRoles` | Append extra roles to the node role | `[]` | +| `ingest.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `ingest.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `ingest.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `ingest.pdb.minAvailable` and `ingest.pdb.maxUnavailable` are empty. | `""` | +| `ingest.nameOverride` | String to partially override opensearch.ingest.fullname | `""` | +| `ingest.fullnameOverride` | String to fully override opensearch.ingest.fullname | `""` | +| `ingest.servicenameOverride` | String to fully override opensearch.ingest.servicename | `""` | +| `ingest.annotations` | Annotations for the ingest statefulset | `{}` | +| `ingest.updateStrategy.type` | Ingest-only nodes statefulset strategy type | `RollingUpdate` | +| `ingest.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if ingest.resources is set (ingest.resources is recommended for production). | `medium` | +| `ingest.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `ingest.heapSize` | OpenSearch ingest-only node heap size. | `512m` | +| `ingest.podSecurityContext.enabled` | Enabled ingest-only pods' Security Context | `true` | +| `ingest.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `ingest.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `ingest.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `ingest.podSecurityContext.fsGroup` | Set ingest-only pod's Security Context fsGroup | `1001` | +| `ingest.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `ingest.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `ingest.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `ingest.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `ingest.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `ingest.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `ingest.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `ingest.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `ingest.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `ingest.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `ingest.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `ingest.hostAliases` | ingest-only pods host aliases | `[]` | +| `ingest.podLabels` | Extra labels for ingest-only pods | `{}` | +| `ingest.podAnnotations` | Annotations for ingest-only pods | `{}` | +| `ingest.podAffinityPreset` | Pod affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.key` | Node label key to match. Ignored if `ingest.affinity` is set | `""` | +| `ingest.nodeAffinityPreset.values` | Node label values to match. Ignored if `ingest.affinity` is set | `[]` | +| `ingest.affinity` | Affinity for ingest-only pods assignment | `{}` | +| `ingest.nodeSelector` | Node labels for ingest-only pods assignment | `{}` | +| `ingest.tolerations` | Tolerations for ingest-only pods assignment | `[]` | +| `ingest.priorityClassName` | ingest-only pods' priorityClassName | `""` | +| `ingest.schedulerName` | Name of the k8s scheduler (other than default) for ingest-only pods | `""` | +| `ingest.terminationGracePeriodSeconds` | In seconds, time the given to the OpenSearch ingest pod needs to terminate gracefully | `""` | +| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `ingest.podManagementPolicy` | podManagementPolicy to manage scaling operation of OpenSearch ingest pods | `Parallel` | +| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest-only nodes pod) | `false` | +| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest-only nodes pod) | `90` | +| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest-only nodes pod) | `true` | +| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest-only nodes pod) | `180` | +| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest-only nodes pod) | `true` | +| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest-only nodes pod) | `90` | +| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.customStartupProbe` | Override default startup probe | `{}` | +| `ingest.customLivenessProbe` | Override default liveness probe | `{}` | +| `ingest.customReadinessProbe` | Override default readiness probe | `{}` | +| `ingest.command` | Override default container command (useful when using custom images) | `[]` | +| `ingest.args` | Override default container args (useful when using custom images) | `[]` | +| `ingest.lifecycleHooks` | for the ingest-only container(s) to automate configuration before or after startup | `{}` | +| `ingest.extraEnvVars` | Array with extra environment variables to add to ingest-only nodes | `[]` | +| `ingest.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ingest-only nodes | `""` | +| `ingest.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ingest-only nodes | `""` | +| `ingest.extraVolumes` | Optionally specify extra list of additional volumes for the ingest-only pod(s) | `[]` | +| `ingest.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ingest-only container(s) | `[]` | +| `ingest.sidecars` | Add additional sidecar containers to the ingest-only pod(s) | `[]` | +| `ingest.initContainers` | Add additional init containers to the ingest-only pod(s) | `[]` | +| `ingest.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `ingest.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `ingest.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | +| `ingest.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `ingest.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `ingest.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `ingest.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `ingest.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `ingest.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `ingest.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `ingest.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `ingest.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `ingest.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `ingest.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `ingest.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `ingest.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `ingest.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `ingest.autoscaling.hpa.enabled` | Enable HPA for APISIX Data Plane | `false` | +| `ingest.autoscaling.hpa.minReplicas` | Minimum number of APISIX Data Plane replicas | `3` | +| `ingest.autoscaling.hpa.maxReplicas` | Maximum number of APISIX Data Plane replicas | `11` | +| `ingest.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `ingest.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `ingest.service.enabled` | Enable Ingest-only service | `false` | +| `ingest.service.type` | OpenSearch ingest-only service type | `ClusterIP` | +| `ingest.service.ports.restAPI` | OpenSearch service REST API port | `9200` | +| `ingest.service.ports.transport` | OpenSearch service transport port | `9300` | +| `ingest.service.nodePorts.restAPI` | Node port for REST API | `""` | +| `ingest.service.nodePorts.transport` | Node port for REST API | `""` | +| `ingest.service.clusterIP` | OpenSearch ingest-only service Cluster IP | `""` | +| `ingest.service.loadBalancerIP` | OpenSearch ingest-only service Load Balancer IP | `""` | +| `ingest.service.loadBalancerSourceRanges` | OpenSearch ingest-only service Load Balancer sources | `[]` | +| `ingest.service.externalTrafficPolicy` | OpenSearch ingest-only service external traffic policy | `Cluster` | +| `ingest.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `ingest.service.annotations` | Additional custom annotations for OpenSearch ingest-only service | `{}` | +| `ingest.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `ingest.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingest.service.headless.annotations` | Annotations for the ingest headless service. | `{}` | +| `ingest.service.headless.nameOverride` | String to fully override opensearch.ingest.servicename | `""` | +| `ingest.service.headless.trafficDistribution` | String Traffic distribution for the ingest headless service | `PreferClose` | +| `ingest.ingress.enabled` | Enable ingress record generation for OpenSearch | `false` | +| `ingest.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingest.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingest.ingress.hostname` | Default host for the ingress record | `opensearch-ingest.local` | +| `ingest.ingress.path` | Default path for the ingress record | `/` | +| `ingest.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingest.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingest.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingest.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingest.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingest.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingest.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingest.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingest.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `ingest.metrics.enabled` | Enable ingest node metrics | `false` | +| `ingest.metrics.service.ports.metrics` | ingest node metrics service port | `80` | +| `ingest.metrics.service.clusterIP` | ingest node metrics service Cluster IP | `""` | +| `ingest.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `ingest.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `ingest.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `ingest.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `ingest.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `10s` | +| `ingest.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `ingest.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `ingest.metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `ingest.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `ingest.metrics.rules.enabled` | Enable render extra rules for PrometheusRule object | `false` | +| `ingest.metrics.rules.spec` | Rules to render into the PrometheusRule object | `[]` | +| `ingest.metrics.rules.selector` | Selector for the PrometheusRule object | `{}` | +| `ingest.metrics.rules.namespace` | Namespace where to create the PrometheusRule object | `monitoring` | +| `ingest.metrics.rules.additionalLabels` | Additional lables to add to the PrometheusRule object | `{}` | + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` | +| `sysctlImage.registry` | Kernel settings modifier image registry | `REGISTRY_NAME` | +| `sysctlImage.repository` | Kernel settings modifier image repository | `REPOSITORY_NAME/os-shell` | +| `sysctlImage.digest` | Kernel settings modifier image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` | +| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` | +| `sysctlImage.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if sysctlImage.resources is set (sysctlImage.resources is recommended for production). | `nano` | +| `sysctlImage.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +### OpenSearch Dashboards Parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | +| `dashboards.enabled` | Enables OpenSearch Dashboards deployment | `false` | +| `dashboards.image.registry` | OpenSearch Dashboards image registry | `REGISTRY_NAME` | +| `dashboards.image.repository` | OpenSearch Dashboards image repository | `REPOSITORY_NAME/opensearch-dashboards` | +| `dashboards.image.digest` | OpenSearch Dashboards image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `dashboards.image.pullPolicy` | OpenSearch Dashboards image pull policy | `IfNotPresent` | +| `dashboards.image.pullSecrets` | OpenSearch Dashboards image pull secrets | `[]` | +| `dashboards.image.debug` | Enable OpenSearch Dashboards image debug mode | `false` | +| `dashboards.service.type` | OpenSearch Dashboards service type | `ClusterIP` | +| `dashboards.service.ports.http` | OpenSearch Dashboards service web UI port | `5601` | +| `dashboards.service.nodePorts.http` | Node port for web UI | `""` | +| `dashboards.service.clusterIP` | OpenSearch Dashboards service Cluster IP | `""` | +| `dashboards.service.loadBalancerIP` | OpenSearch Dashboards service Load Balancer IP | `""` | +| `dashboards.service.loadBalancerSourceRanges` | OpenSearch Dashboards service Load Balancer sources | `[]` | +| `dashboards.service.externalTrafficPolicy` | OpenSearch Dashboards service external traffic policy | `Cluster` | +| `dashboards.service.annotations` | Additional custom annotations for OpenSearch Dashboards service | `{}` | +| `dashboards.service.extraPorts` | Extra ports to expose in OpenSearch Dashboards service (normally used with the `sidecars` value) | `[]` | +| `dashboards.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `dashboards.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `dashboards.service.nameOverride` | String to fully override opensearch.dashboards.servicename | `""` | +| `dashboards.ingress.enabled` | Enable ingress record generation for OpenSearch Dashboards | `false` | +| `dashboards.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `dashboards.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `dashboards.ingress.hostname` | Default host for the ingress record | `opensearch-dashboards.local` | +| `dashboards.ingress.path` | Default path for the ingress record | `/` | +| `dashboards.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `dashboards.ingress.tls` | Enable TLS configuration for the host defined at `dashboards.ingress.hostname` parameter | `false` | +| `dashboards.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `dashboards.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `dashboards.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `dashboards.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `dashboards.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `dashboards.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `dashboards.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `dashboards.containerPorts.http` | OpenSearch Dashboards HTTP port | `5601` | +| `dashboards.password` | Password for OpenSearch Dashboards | `""` | +| `dashboards.replicaCount` | Number of data-only replicas to deploy | `1` | +| `dashboards.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `dashboards.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `dashboards.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `dashboards.pdb.minAvailable` and `dashboards.pdb.maxUnavailable` are empty. | `""` | +| `dashboards.nameOverride` | String to partially override opensearch.dashboards.fullname | `""` | +| `dashboards.fullnameOverride` | String to fully override opensearch.dashboards.fullname | `""` | +| `dashboards.servicenameOverride` | String to fully override opensearch.dashboards.servicename | `""` | +| `dashboards.updateStrategy.type` | Data-only nodes statefulset strategy type | `RollingUpdate` | +| `dashboards.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dashboards.resources is set (dashboards.resources is recommended for production). | `small` | +| `dashboards.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `dashboards.heapSize` | OpenSearch data node heap size. | `1024m` | +| `dashboards.podSecurityContext.enabled` | Enabled data pods' Security Context | `true` | +| `dashboards.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `dashboards.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `dashboards.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `dashboards.podSecurityContext.fsGroup` | Set dashboards pod's Security Context fsGroup | `1001` | +| `dashboards.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `dashboards.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `dashboards.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `dashboards.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `dashboards.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `dashboards.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `dashboards.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `dashboards.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `dashboards.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `dashboards.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `dashboards.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `dashboards.hostAliases` | data pods host aliases | `[]` | +| `dashboards.podLabels` | Extra labels for data pods | `{}` | +| `dashboards.podAnnotations` | Annotations for data pods | `{}` | +| `dashboards.podAffinityPreset` | Pod affinity preset. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dashboards.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dashboards.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dashboards.nodeAffinityPreset.key` | Node label key to match. Ignored if `dashboards.affinity` is set | `""` | +| `dashboards.nodeAffinityPreset.values` | Node label values to match. Ignored if `dashboards.affinity` is set | `[]` | +| `dashboards.affinity` | Affinity for data pods assignment | `{}` | +| `dashboards.nodeSelector` | Node labels for data pods assignment | `{}` | +| `dashboards.tolerations` | Tolerations for data pods assignment | `[]` | +| `dashboards.priorityClassName` | data pods' priorityClassName | `""` | +| `dashboards.schedulerName` | Name of the k8s scheduler (other than default) for data pods | `""` | +| `dashboards.terminationGracePeriodSeconds` | In seconds, time the given to the OpenSearch data pod needs to terminate gracefully | `""` | +| `dashboards.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `dashboards.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` | +| `dashboards.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `120` | +| `dashboards.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `dashboards.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `dashboards.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `dashboards.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `dashboards.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` | +| `dashboards.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `180` | +| `dashboards.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `20` | +| `dashboards.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `dashboards.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `dashboards.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `8` | +| `dashboards.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` | +| `dashboards.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `120` | +| `dashboards.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `dashboards.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `dashboards.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `dashboards.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `dashboards.customStartupProbe` | Override default startup probe | `{}` | +| `dashboards.customLivenessProbe` | Override default liveness probe | `{}` | +| `dashboards.customReadinessProbe` | Override default readiness probe | `{}` | +| `dashboards.command` | Override default container command (useful when using custom images) | `[]` | +| `dashboards.args` | Override default container args (useful when using custom images) | `[]` | +| `dashboards.lifecycleHooks` | for the data container(s) to automate configuration before or after startup | `{}` | +| `dashboards.extraEnvVars` | Array with extra environment variables to add to data nodes | `[]` | +| `dashboards.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data nodes | `""` | +| `dashboards.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data nodes | `""` | +| `dashboards.extraVolumes` | Optionally specify extra list of additional volumes for the data pod(s) | `[]` | +| `dashboards.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the data container(s) | `[]` | +| `dashboards.sidecars` | Add additional sidecar containers to the data pod(s) | `[]` | +| `dashboards.initContainers` | Add additional init containers to the data pod(s) | `[]` | +| `dashboards.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `dashboards.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `dashboards.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | +| `dashboards.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `dashboards.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `dashboards.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `dashboards.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `dashboards.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dashboards.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dashboards.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `dashboards.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `dashboards.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `dashboards.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `dashboards.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `dashboards.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `dashboards.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `dashboards.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `dashboards.autoscaling.hpa.enabled` | Enable HPA for APISIX Data Plane | `false` | +| `dashboards.autoscaling.hpa.minReplicas` | Minimum number of APISIX Data Plane replicas | `3` | +| `dashboards.autoscaling.hpa.maxReplicas` | Maximum number of APISIX Data Plane replicas | `11` | +| `dashboards.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `dashboards.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `dashboards.tls.enabled` | Enable TLS for OpenSearch Dashboards webserver | `false` | +| `dashboards.tls.existingSecret` | Existing secret containing the certificates for OpenSearch Dashboards webserver | `""` | +| `dashboards.tls.autoGenerated` | Create self-signed TLS certificates. | `true` | +| `dashboards.persistence.enabled` | Enable persistence using Persistent Volume Claims | `false` | +| `dashboards.persistence.mountPath` | Path to mount the volume at. | `/drycc/opensearch-dashboards` | +| `dashboards.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` | +| `dashboards.persistence.storageClass` | Storage class of backing PVC | `""` | +| `dashboards.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `dashboards.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `dashboards.persistence.size` | Size of data volume | `8Gi` | +| `dashboards.persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` | +| `dashboards.persistence.selector` | Selector to match an existing Persistent Volume for OpenSearch data PVC | `{}` | +| `dashboards.persistence.dataSource` | Custom PVC data source | `{}` | + +### OpenSearch Snapshots Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `snapshots.enabled` | Enable automatic setup of repositories and snapshot policies | `false` | +| `snapshots.command` | Override default container command (useful when using custom images) | `[]` | +| `snapshots.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `snapshots.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `snapshots.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `snapshots.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `snapshots.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `snapshots.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `snapshots.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `snapshots.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `snapshots.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `snapshots.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `snapshots.fullnameOverride` | String to fully override opensearch.snapshots.fullname | `""` | +| `snapshots.image.registry` | OpenSearch Snapshots image registry | `REGISTRY_NAME` | +| `snapshots.image.repository` | OpenSearch Snapshots image repository | `REPOSITORY_NAME/os-shell` | +| `snapshots.image.digest` | OpenSearch Snapshots image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `snapshots.image.pullPolicy` | OpenSearch Snapshots image pull policy | `IfNotPresent` | +| `snapshots.image.pullSecrets` | OpenSearch Snapshots image pull secrets | `[]` | +| `snapshots.image.debug` | Enable OpenSearch Snapshots image debug mode | `false` | +| `snapshots.nameOverride` | String to partially override common.names.fullname | `""` | +| `snapshots.persistence.enabled` | Enable persistence using Persistent Volume Claims | `false` | +| `snapshots.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteMany"]` | +| `snapshots.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `snapshots.persistence.dataSource` | Custom PVC data source | `{}` | +| `snapshots.persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` | +| `snapshots.persistence.labels` | Extra labels for the Persistent Volume Claim | `{}` | +| `snapshots.persistence.selector` | Selector to match an existing Persistent Volume for OpenSearch data PVC | `{}` | +| `snapshots.persistence.size` | Size of data volume | `8Gi` | +| `snapshots.persistence.storageClass` | Storage class of backing PVC | `""` | +| `snapshots.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services | `""` | +| `snapshots.podSecurityContext.enabled` | Enabled data pods' Security Context | `true` | +| `snapshots.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `snapshots.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `snapshots.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `snapshots.podSecurityContext.fsGroup` | Set snapshots pod's Security Context fsGroup | `1001` | +| `snapshots.policies` | Each object represents a snapshot policy in YAML form, which will be converted to JSON and then passed as the HTTP body data to the OpenSearch REST API. | `SNAPSHOT_POLICY_API_BODY` | +| `snapshots.repositories` | Each object represents a snapshot repository in YAML form, which will be converted to JSON and then passed as the HTTP body data to the OpenSearch REST API. | `SNAPSHOT_REPO_API_BODY` | +| `snapshots.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if snapshots.resources is set (snapshots.resources is recommended for production). | `nano` | +| `snapshots.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set name=my-open,client.service.port=8080 \ + oci://REGISTRY_NAME/REPOSITORY_NAME/opensearch +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the OpenSearch cluster name to `my-open` and REST port number to `8080`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/opensearch +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/opensearch/values.yaml). + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 2.0.0 + +This major version updates the Opensearch image and Opensearch Dashboard from version 2.x to 3.x. Follow the [official instructions](https://docs.opensearch.org/docs/latest/install-and-configure/upgrade-opensearch/index/) to upgrade to 3.x. + +In addition, this new version of the chart removes the `metrics` related values: + +- `master.metrics.*` +- `data.metrics.*` +- `coordinating.metrics.*` +- `ingest.metrics.*` + +The reason for this change is because the bitnami/opensearch container 3.x no longer contains the prometheus-exporter plugin, which hasn't released a new version since Opensearch 2.17.x and is not supported in Opensearch 3.x. + +### To 1.5.0 + +This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). + +### To 1.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +## License + +Copyright © 2023 Drycc Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/NOTES.txt b/addons/opensearch/3.0/chart/opensearch-3.0/templates/NOTES.txt new file mode 100644 index 00000000..23cbe549 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/NOTES.txt @@ -0,0 +1,126 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if contains .Values.service.type "LoadBalancer" }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" you have most likely + exposed the Opensearch service externally. + + Please note that Opensearch does not implement a authentication + mechanism to secure your cluster. For security reasons, we strongly + suggest that you switch to "ClusterIP" or "NodePort". +------------------------------------------------------------------------------- +{{- end }} +{{- if not .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Opensearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the OS containers fail to boot with ERROR messages. + + To check whether the host machine meets the requirements, run the command + below: + + kubectl logs --namespace {{ include "common.names.namespace" . }} $(kubectl get --namespace {{ include "common.names.namespace" . }} \ + pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \ + opensearch + + You can adapt the Kernel parameters on you cluster as described in the + official documentation: + + https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster + + As an alternative, you can specify "sysctlImage.enabled=true" to use a + privileged initContainer to change those settings in the Kernel: + + helm upgrade --namespace {{ include "common.names.namespace" . }} {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/opensearch --set sysctlImage.enabled=true + + Note that this requires the ability to run privileged containers, which is likely not + the case on many secure clusters. To cover this use case, you can also set some parameters + in the config file to customize the default settings: + + https://www.open.co/guide/en/opensearch/reference/current/index-modules-store.html + https://www.open.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html + + For that, you can place the desired parameters by using the "config" block present in the values.yaml + +{{- else if .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Opensearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the OS containers fail to boot with ERROR messages. + + More information about these requirements can be found in the links below: + + https://www.open.co/guide/en/opensearch/reference/current/file-descriptors.html + https://www.open.co/guide/en/opensearch/reference/current/vm-max-map-count.html + + This chart uses a privileged initContainer to change those settings in the Kernel + by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536 + +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/opensearch/entrypoint.sh /opt/drycc/scripts/opensearch/run.sh + +{{- else }} + + Opensearch can be accessed within the cluster on port {{ include "opensearch.service.ports.restAPI" . }} at {{ template "opensearch.service.name" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} + + To access from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "opensearch.service.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + curl http://$NODE_IP:$NODE_PORT/ +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "opensearch.service.name" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "opensearch.service.name" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + curl http://$SERVICE_IP:{{ include "opensearch.service.ports.restAPI" . }}/ +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "opensearch.service.name" . }} {{ include "opensearch.service.ports.restAPI" . }}:9200 & + curl http://127.0.0.1:9200/ + +{{- end }} +{{- end }} + +{{ include "opensearch.validateValues" . }} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.sysctlImage }} +{{- include "common.warnings.rollingTag" .Values.dashboards.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.snapshots.image }} +{{- include "common.warnings.resources" (dict "sections" (list "coordinating" "dashboards" "data" "ingest" "master" "sysctlImage" "volumePermissions" "snapshots") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.sysctlImage .Values.dashboards.image .Values.snapshots.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.sysctlImage .Values.dashboards.image .Values.snapshots.image) "context" $) }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/_helpers.tpl b/addons/opensearch/3.0/chart/opensearch-3.0/templates/_helpers.tpl new file mode 100644 index 00000000..bdaba687 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/_helpers.tpl @@ -0,0 +1,859 @@ +{{/* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper OS image name +*/}} +{{- define "opensearch.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper sysctl image name +*/}} +{{- define "opensearch.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "opensearch.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper OpenSearch Dashboards image name +*/}} +{{- define "opensearch.dashboards.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.dashboards.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper OpenSearch Snapshots image name +*/}} +{{- define "opensearch.snapshots.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.snapshots.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "opensearch.imagePullSecrets" -}} +{{ include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.sysctlImage .Values.volumePermissions.image .Values.dashboards.image .Values.snapshots.image) "context" $) }} +{{- end -}} + +{{/* +Return the proper sysctl image name +*/}} +{{- define "opensearch.sysctl.initContainer" -}} +## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) +- name: sysctl + image: {{ include "opensearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.sysctlImage.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "opensearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "opensearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- else if ne .Values.sysctlImage.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.sysctlImage.resourcesPreset) | nindent 12 }} + {{- end }} +{{- end -}} + +{{/* +Return the copy plugins init container definition +*/}} +{{- define "opensearch.copy-default-plugins.initContainer" -}} +{{- $block := index .context.Values .component }} +- name: copy-default-plugins + image: {{ include "opensearch.image" .context }} + imagePullPolicy: {{ .context.Values.image.pullPolicy | quote }} + {{- if $block.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" $block.containerSecurityContext "context" .context) | nindent 12 }} + {{- end }} + {{- if $block.resources }} + resources: {{- toYaml $block.resources | nindent 12 }} + {{- else if ne $block.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" $block.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/liblog.sh + . /opt/drycc/scripts/libfs.sh + . /opt/drycc/scripts/opensearch-env.sh + + mkdir -p /emptydir/app-conf-dir /emptydir/app-plugins-dir + info "Copying directories to empty dir" + + if ! is_dir_empty "$DB_DEFAULT_CONF_DIR"; then + info "Copying default configuration" + cp -nr --preserve=mode "$DB_DEFAULT_CONF_DIR"/* /emptydir/app-conf-dir + fi + if ! is_dir_empty "$DB_DEFAULT_PLUGINS_DIR"; then + info "Copying default plugins" + cp -nr "$DB_DEFAULT_PLUGINS_DIR"/* /emptydir/app-plugins-dir + fi + + info "Copy operation completed" + volumeMounts: + - name: empty-dir + mountPath: /emptydir +{{- end -}} + +{{/* +Return the copy plugins init container definition +*/}} +{{- define "opensearch.dashboards.copy-default-plugins.initContainer" -}} +- name: copy-default-plugins + image: {{ include "opensearch.dashboards.image" . }} + imagePullPolicy: {{ .Values.dashboards.image.pullPolicy | quote }} + {{- if .Values.dashboards.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dashboards.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dashboards.resources }} + resources: {{- toYaml .Values.dashboards.resources | nindent 12 }} + {{- else if ne .Values.dashboards.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.dashboards.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + #!/bin/bash + + . /opt/drycc/scripts/libfs.sh + . /opt/drycc/scripts/opensearch-dashboards-env.sh + + if ! is_dir_empty "$SERVER_DEFAULT_PLUGINS_DIR"; then + cp -nr "$SERVER_DEFAULT_PLUGINS_DIR"/* /plugins + fi + volumeMounts: + - name: empty-dir + mountPath: /plugins + subPath: app-plugins-dir +{{- end -}} + +{{/* +Set Elasticsearch PVC. +*/}} +{{- define "opensearch.dashboards.pvc" -}} +{{- .Values.dashboards.persistence.existingClaim | default (include "opensearch.dashboards.fullname" .) -}} +{{- end -}} + +{{/* +Name for the OpenSearch service +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.service.name" -}} +{{- include "common.names.fullname" . | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Port number for the OpenSearch service REST API port +*/}} +{{- define "opensearch.service.ports.restAPI" -}} +{{- printf "%d" (int .Values.service.ports.restAPI) -}} +{{- end -}} + +{{/* +Create a default fully qualified master name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.master.fullname" -}} +{{- $name := default "master" .Values.master.nameOverride -}} +{{- if .Values.master.fullnameOverride -}} +{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default master service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.master.servicename" -}} +{{- $name := coalesce .Values.master.service.headless.nameOverride .Values.master.servicenameOverride | default "" -}} +{{- default (printf "%s-hl" (include "opensearch.master.fullname" .)) (tpl $name .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified coordinating name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.coordinating.fullname" -}} +{{- $name := default "coordinating" .Values.coordinating.nameOverride -}} +{{- if .Values.coordinating.fullnameOverride -}} +{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default coordinating service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.coordinating.servicename" -}} +{{- $name := coalesce .Values.coordinating.service.headless.nameOverride .Values.coordinating.servicenameOverride | default "" -}} +{{- default (printf "%s-hl" (include "opensearch.coordinating.fullname" .)) (tpl $name .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified data name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.data.fullname" -}} +{{- $name := default "data" .Values.data.nameOverride -}} +{{- if .Values.data.fullnameOverride -}} +{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default data service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.data.servicename" -}} +{{- $name := coalesce .Values.data.service.headless.nameOverride .Values.data.servicenameOverride | default "" -}} +{{- default (printf "%s-hl" (include "opensearch.data.fullname" .)) (tpl $name .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified ingest name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.ingest.fullname" -}} +{{- $name := default "ingest" .Values.ingest.nameOverride -}} +{{- if .Values.ingest.fullnameOverride -}} +{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default ingest service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.ingest.servicename" -}} +{{- $name := coalesce .Values.ingest.service.headless.nameOverride .Values.ingest.servicenameOverride | default "" -}} +{{- default (printf "%s-hl" (include "opensearch.ingest.fullname" .)) (tpl $name .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Returns true if at least one master-eligible node replica has been configured. +*/}} +{{- define "opensearch.master.enabled" -}} +{{- if or .Values.master.autoscaling.hpa.enabled (gt (int .Values.master.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one coordinating-only node replica has been configured. +*/}} +{{- define "opensearch.coordinating.enabled" -}} +{{- if or .Values.coordinating.autoscaling.hpa.enabled (gt (int .Values.coordinating.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one data-only node replica has been configured. +*/}} +{{- define "opensearch.data.enabled" -}} +{{- if or .Values.data.autoscaling.hpa.enabled (gt (int .Values.data.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one ingest-only node replica has been configured. +*/}} +{{- define "opensearch.ingest.enabled" -}} +{{- if and .Values.ingest.enabled (or .Values.ingest.autoscaling.hpa.enabled (gt (int .Values.ingest.replicaCount) 0)) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one dashboards replica has been configured. +*/}} +{{- define "opensearch.dashboards.enabled" -}} +{{- if and .Values.dashboards.enabled (or .Values.dashboards.autoscaling.hpa.enabled (gt (int .Values.dashboards.replicaCount) 0)) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the hostname of every OpenSearch seed node +*/}} +{{- define "opensearch.hosts" -}} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- if (include "opensearch.master.enabled" .) -}} +{{- $masterFullname := include "opensearch.master.servicename" .}} +{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "opensearch.coordinating.enabled" .) -}} +{{- $coordinatingFullname := include "opensearch.coordinating.servicename" .}} +{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "opensearch.data.enabled" .) -}} +{{- $dataFullname := include "opensearch.data.servicename" .}} +{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "opensearch.ingest.enabled" .) -}} +{{- $ingestFullname := include "opensearch.ingest.servicename" .}} +{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- range .Values.extraHosts }} +{{- . }}, +{{- end }} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "opensearch.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "opensearch.initScriptsCM" -}} +{{- print (tpl .Values.initScriptsCM .) -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "opensearch.initScriptsSecret" -}} +{{- print (tpl .Values.initScriptsSecret .) -}} +{{- end -}} + +{{/* +Create the name of the master service account to use +*/}} +{{- define "opensearch.master.serviceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (include "opensearch.master.fullname" .) .Values.master.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the coordinating-only service account to use +*/}} +{{- define "opensearch.coordinating.serviceAccountName" -}} +{{- if .Values.coordinating.serviceAccount.create -}} + {{ default (include "opensearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.coordinating.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the data service account to use +*/}} +{{- define "opensearch.data.serviceAccountName" -}} +{{- if .Values.data.serviceAccount.create -}} + {{ default (include "opensearch.data.fullname" .) .Values.data.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.data.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the ingest service account to use +*/}} +{{- define "opensearch.ingest.serviceAccountName" -}} +{{- if .Values.ingest.serviceAccount.create -}} + {{ default (include "opensearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.ingest.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret for typed nodes. +*/}} +{{- define "opensearch.node.tlsSecretName" -}} +{{- $secretName := index .context.Values.security.tls .nodeRole "existingSecret" -}} +{{- if $secretName -}} + {{- print (tpl $secretName .context) -}} +{{- else -}} + {{- printf "%s-crt" (include (printf "opensearch.%s.fullname" .nodeRole) .context) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret items for typed nodes. +*/}} +{{- define "opensearch.node.tlsSecretItems" -}} +{{- $items := list }} +{{- $items = append $items (dict "key" (include "opensearch.node.tlsSecretCertKey" (dict "nodeRole" .nodeRole "context" .context)) "path" "tls.crt") }} +{{- $items = append $items (dict "key" (include "opensearch.node.tlsSecretKeyKey" (dict "nodeRole" .nodeRole "context" .context)) "path" "tls.key") }} +{{- $items = append $items (dict "key" (include "opensearch.node.tlsSecretCAKey" (dict "nodeRole" .nodeRole "context" .context)) "path" "ca.crt") }} +{{ $items | toYaml }} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the certificate for typed nodes. +*/}} +{{- define "opensearch.node.tlsSecretCertKey" -}} +{{- include "opensearch.tlsSecretKey" (dict "type" .nodeRole "secretKey" "certKey" "defaultKey" "tls.crt" "context" .context) -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the certificates key for typed nodes. +*/}} +{{- define "opensearch.node.tlsSecretKeyKey" -}} +{{- include "opensearch.tlsSecretKey" (dict "type" .nodeRole "secretKey" "keyKey" "defaultKey" "tls.key" "context" .context) -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the ca certificate for typed nodes. +*/}} +{{- define "opensearch.node.tlsSecretCAKey" -}} +{{- include "opensearch.tlsSecretKey" (dict "type" .nodeRole "secretKey" "caKey" "defaultKey" "ca.crt" "context" .context) -}} +{{- end -}} + +{{/* +Return the opensearch admin TLS credentials secret for all nodes. +*/}} +{{- define "opensearch.admin.tlsSecretName" -}} +{{- $secretName := .context.Values.security.tls.admin.existingSecret -}} +{{- if $secretName -}} + {{- print (tpl $secretName .context) -}} +{{- else -}} + {{- printf "%s-admin-crt" (include "common.names.fullname" .context) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret items for all nodes. +*/}} +{{- define "opensearch.admin.tlsSecretItems" -}} +{{- $items := list }} +{{- $items = append $items (dict "key" (include "opensearch.admin.tlsSecretCertKey" (dict "context" .context)) "path" "admin.crt") }} +{{- $items = append $items (dict "key" (include "opensearch.admin.tlsSecretKeyKey" (dict "context" .context)) "path" "admin.key") }} +{{ $items | toYaml }} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the certificate for all nodes. +*/}} +{{- define "opensearch.admin.tlsSecretCertKey" -}} +{{- include "opensearch.tlsSecretKey" (dict "type" "admin" "secretKey" "certKey" "defaultKey" "admin.crt" "context" .context) -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the certificates key for all nodes. +*/}} +{{- define "opensearch.admin.tlsSecretKeyKey" -}} +{{- include "opensearch.tlsSecretKey" (dict "type" "admin" "secretKey" "keyKey" "defaultKey" "admin.key" "context" .context) -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret key of the given type. +*/}} +{{- define "opensearch.tlsSecretKey" -}} +{{- $secretConfig := index .context.Values.security.tls .type -}} +{{- if $secretConfig.existingSecret }} +{{- print (index $secretConfig .secretKey | default .defaultKey) }} +{{- else }} +{{- print .defaultKey }} +{{- end }} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "opensearch.createTlsSecret" -}} +{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "opensearch.security.tlsSecretsProvided" .)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if an authentication credentials secret object should be created +*/}} +{{- define "opensearch.createSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the OpenSearch authentication credentials secret name +*/}} +{{- define "opensearch.secretName" -}} +{{- if .Values.security.existingSecret -}} + {{- print (tpl .Values.security.existingSecret $) -}} +{{- else -}} + {{- print (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS password secret object should be created +*/}} +{{- define "opensearch.createTlsPasswordsSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the OpenSearch TLS password secret name +*/}} +{{- define "opensearch.tlsPasswordsSecret" -}} +{{- if .Values.security.tls.passwordsSecret -}} + {{- print (tpl .Values.security.tls.passwordsSecret .) -}} +{{- else -}} + {{- printf "%s-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the name of the secret key containing the Keystore password +*/}} +{{- define "opensearch.keystorePasswordKey" -}} +{{- default "keystore-password" (tpl .Values.security.tls.secretKeystoreKey .) -}} +{{- end -}} + +{{/* +Returns the name of the secret key containing the Truststore password +*/}} +{{- define "opensearch.truststorePasswordKey" -}} +{{- default "truststore-password" (tpl .Values.security.tls.secretTruststoreKey .) -}} +{{- end -}} + +{{/* +Returns the name of the secret key containing the PEM key password +*/}} +{{- define "opensearch.keyPasswordKey" -}} +{{- default "key-password" (tpl .Values.security.tls.secretKey .) -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "opensearch.configure.security" -}} +{{- $nodesDN := list }} +{{- if and (include "opensearch.master.enabled" .) }} +{{- $nodesDN = append $nodesDN (printf "CN=%s" (include "opensearch.master.fullname" .))}} +{{- end }} +{{- if and (include "opensearch.data.enabled" .) }} +{{- $nodesDN = append $nodesDN (printf "CN=%s" (include "opensearch.data.fullname" .))}} +{{- end }} +{{- if and (include "opensearch.coordinating.enabled" .) }} +{{- $nodesDN = append $nodesDN (printf "CN=%s" (include "opensearch.coordinating.fullname" .))}} +{{- end }} +{{- if and (include "opensearch.ingest.enabled" .) }} +{{- $nodesDN = append $nodesDN (printf "CN=%s" (include "opensearch.ingest.fullname" .))}} +{{- end }} +- name: OPENSEARCH_SECURITY_NODES_DN + value: {{ coalesce .Values.security.tls.nodesDN ( join ";" $nodesDN ) }} +- name: OPENSEARCH_SECURITY_ADMIN_DN + value: {{ coalesce .Values.security.tls.adminDN "CN=admin;CN=admin" }} +- name: OPENSEARCH_ENABLE_SECURITY + value: "true" +{{- if .Values.usePasswordFiles }} +- name: OPENSEARCH_PASSWORD_FILE + value: "/opt/drycc/opensearch/secrets/opensearch-password" +- name: OPENSEARCH_DASHBOARDS_PASSWORD_FILE + value: "/opt/drycc/opensearch/secrets/opensearch-dashboards-password" +- name: LOGSTASH_PASSWORD_FILE + value: "/opt/drycc/opensearch/secrets/logstash-password" +{{- else }} +- name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.secretName" . }} + key: opensearch-password +- name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.secretName" . }} + key: opensearch-dashboards-password +- name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.secretName" . }} + key: logstash-password +{{- end }} +- name: OPENSEARCH_ENABLE_FIPS_MODE + value: {{ .Values.security.fipsMode | quote }} +- name: OPENSEARCH_TLS_VERIFICATION_MODE + value: {{ .Values.security.tls.verificationMode | quote }} +- name: OPENSEARCH_ENABLE_REST_TLS + value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }} +{{- if or (include "opensearch.createTlsSecret" .) .Values.security.tls.usePemCerts }} +- name: OPENSEARCH_TLS_USE_PEM + value: "true" +{{- else }} +- name: OPENSEARCH_KEYSTORE_LOCATION + value: "/opt/drycc/opensearch/config/certs/{{ .Values.security.tls.keystoreFilename }}" +- name: OPENSEARCH_TRUSTSTORE_LOCATION + value: "/opt/drycc/opensearch/config/certs/{{ .Values.security.tls.truststoreFilename }}" +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }} +{{- if .Values.usePasswordFiles }} +- name: OPENSEARCH_KEYSTORE_PASSWORD_FILE + value: {{ printf "/opt/drycc/opensearch/secrets/%s" (include "opensearch.keystorePasswordKey" .) }} +{{- else }} +- name: OPENSEARCH_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + key: {{ include "opensearch.keystorePasswordKey" . | quote }} +{{- end }} +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }} +{{- if .Values.usePasswordFiles }} +- name: OPENSEARCH_KEYSTORE_PASSWORD_FILE + value: {{ printf "/opt/drycc/opensearch/secrets/%s" (include "opensearch.truststorePasswordKey" .) }} +{{- else }} +- name: OPENSEARCH_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + key: {{ include "opensearch.truststorePasswordKey" . | quote }} +{{- end }} +{{- end }} +{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }} +{{- if .Values.usePasswordFiles }} +- name: OPENSEARCH_KEY_PASSWORD_FILE + value: {{ printf "/opt/drycc/opensearch/secrets/%s" (include "opensearch.keyPasswordKey" .) }} +{{- else }} +- name: OPENSEARCH_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + key: {{ include "opensearch.keyPasswordKey" . | quote }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns true if at least 1 existing secret was provided +*/}} +{{- define "opensearch.security.tlsSecretsProvided" -}} +{{- $masterSecret := (and (include "opensearch.master.enabled" .) .Values.security.tls.master.existingSecret) -}} +{{- $coordinatingSecret := (and (include "opensearch.coordinating.enabled" .) .Values.security.tls.coordinating.existingSecret) -}} +{{- $dataSecret := (and (include "opensearch.data.enabled" .) .Values.security.tls.data.existingSecret) -}} +{{- $ingestSecret := (and (include "opensearch.ingest.enabled" .) .Values.security.tls.ingest.existingSecret) -}} +{{- if or $masterSecret $coordinatingSecret $dataSecret $ingestSecret }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of OpenSearch - Existing secret not provided for master nodes */}} +{{- define "opensearch.validateValues.security.missingTlsSecrets.master" -}} +{{- $masterSecret := (and (include "opensearch.master.enabled" .) (not .Values.security.tls.master.existingSecret)) -}} +{{- if and .Values.security.enabled (include "opensearch.security.tlsSecretsProvided" .) $masterSecret -}} +opensearch: security.tls.master.existingSecret + Missing secret containing the TLS certificates for the OpenSearch master nodes. + Provide the certificates using --set .security.tls.master.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of OpenSearch - Existing secret not provided for coordinating-only nodes */}} +{{- define "opensearch.validateValues.security.missingTlsSecrets.coordinating" -}} +{{- $coordinatingSecret := (and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret)) -}} +{{- if and .Values.security.enabled (include "opensearch.security.tlsSecretsProvided" .) $coordinatingSecret -}} +opensearch: security.tls.coordinating.existingSecret + Missing secret containing the TLS certificates for the OpenSearch coordinating-only nodes. + Provide the certificates using --set .security.tls.coordinating.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of OpenSearch - Existing secret not provided for data nodes */}} +{{- define "opensearch.validateValues.security.missingTlsSecrets.data" -}} +{{- $dataSecret := (and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret)) -}} +{{- if and .Values.security.enabled (include "opensearch.security.tlsSecretsProvided" .) $dataSecret -}} +opensearch: security.tls.data.existingSecret + Missing secret containing the TLS certificates for the OpenSearch data nodes. + Provide the certificates using --set .security.tls.data.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of OpenSearch - Existing secret not provided for ingest nodes */}} +{{- define "opensearch.validateValues.security.missingTlsSecrets.ingest" -}} +{{- $ingestSecret := (and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret)) -}} +{{- if and .Values.security.enabled (include "opensearch.security.tlsSecretsProvided" .) $ingestSecret -}} +opensearch: security.tls.ingest.existingSecret + Missing secret containing the TLS certificates for the OpenSearch ingest nodes. + Provide the certificates using --set .security.tls.ingest.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of OpenSearch - TLS enabled but no certificates provided */}} +{{- define "opensearch.validateValues.security.tls" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "opensearch.security.tlsSecretsProvided" .)) -}} +opensearch: security.tls + In order to enable X-Pack Security, it is necessary to configure TLS. + Three different mechanisms can be used: + - Provide an existing secret containing the Keystore and Truststore for each role + - Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true` + - Enable using auto-generated certificates with `security.tls.autoGenerated=true` + Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs, + --set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs +{{- end -}} +{{- end -}} + +{{/* Validate at least OpenSearch one master node is configured */}} +{{- define "opensearch.validateValues.master.replicas" -}} +{{- if not (include "opensearch.master.enabled" .) -}} +opensearch: master.replicas + OpenSearch needs at least one master-eligible node to form a cluster. +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "opensearch.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "opensearch.validateValues.master.replicas" .) -}} +{{- $messages := append $messages (include "opensearch.validateValues.security.tls" .) -}} +{{- $messages := append $messages (include "opensearch.validateValues.security.missingTlsSecrets.master" .) -}} +{{- $messages := append $messages (include "opensearch.validateValues.security.missingTlsSecrets.data" .) -}} +{{- $messages := append $messages (include "opensearch.validateValues.security.missingTlsSecrets.coordinating" .) -}} +{{- $messages := append $messages (include "opensearch.validateValues.security.missingTlsSecrets.ingest" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Sysctl set if less then +*/}} +{{- define "opensearch.sysctlIfLess" -}} +CURRENT=`sysctl -n {{ .key }}`; +DESIRED="{{ .value }}"; +if [ "$DESIRED" -gt "$CURRENT" ]; then + sysctl -w {{ .key }}={{ .value }}; +fi; +{{- end -}} + +{{/* +Create a default fully qualified dashboards name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.dashboards.fullname" -}} +{{- $name := default "dashboards" .Values.dashboards.nameOverride -}} +{{- if .Values.dashboards.fullnameOverride -}} +{{- .Values.dashboards.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the dashboards service account to use +*/}} +{{- define "opensearch.dashboards.serviceAccountName" -}} +{{- if .Values.dashboards.serviceAccount.create -}} + {{ default (include "opensearch.dashboards.fullname" .) .Values.dashboards.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.dashboards.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default Dashboards service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.dashboards.servicename" -}} +{{- $name := coalesce .Values.dashboards.service.nameOverride .Values.dashboards.servicenameOverride | default "" -}} +{{- default (include "opensearch.dashboards.fullname" .) (tpl $name .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Set OpenSearch URL. +*/}} +{{- define "opensearch.url" -}} +{{- $protocol := ternary "https" "http" .Values.security.tls.restEncryption -}} +{{- printf "%s://%s:%s" $protocol (include "opensearch.service.name" .) (include "opensearch.service.ports.restAPI" .) -}} +{{- end -}} + +{{/* +Return the opensearch TLS credentials secret for Dashboards UI. +*/}} +{{- define "opensearch.dashboards.tlsSecretName" -}} +{{- $secretName := .Values.dashboards.tls.existingSecret -}} +{{- if $secretName -}} + {{- print (tpl $secretName .) -}} +{{- else -}} + {{- printf "%s-crt" (include "opensearch.dashboards.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "opensearch.dashboards.createTlsSecret" -}} +{{- if and .Values.dashboards.tls.enabled .Values.dashboards.tls.autoGenerated (not .Values.dashboards.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified snapshots name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "opensearch.snapshots.fullname" -}} +{{- $name := default "snapshots" .Values.snapshots.nameOverride -}} +{{- if .Values.snapshots.fullnameOverride -}} +{{- .Values.snapshots.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a global mount path for snapshots volume based on repo path +*/}} +{{- define "opensearch.snapshots.mountPath" -}} +{{- required "Value snapshotRepoPath must be set!" $.Values.snapshotRepoPath -}} +{{- end -}} + +{{/* +Create name for snapshot API repo data ConfigMap +*/}} +{{- define "opensearch.snapshots.repoDataConfigMap" -}} +{{- printf "%s-repo-data" (include "opensearch.snapshots.fullname" $) -}} +{{- end -}} + +{{/* +Create name for snapshot API policy data ConfigMap +*/}} +{{- define "opensearch.snapshots.policyDataConfigMap" -}} +{{- printf "%s-policy-data" (include "opensearch.snapshots.fullname" $) -}} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/configmap.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/configmap.yaml new file mode 100644 index 00000000..20ab346c --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/configmap.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or .Values.config .Values.extraConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.config }} + opensearch.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.config "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.extraConfig }} + my_opensearch.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.extraConfig "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/hpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/hpa.yaml new file mode 100644 index 00000000..00b68aaa --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.coordinating.enabled" .) .Values.coordinating.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opensearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "opensearch.coordinating.fullname" . }} + minReplicas: {{ .Values.coordinating.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.coordinating.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.coordinating.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.coordinating.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.coordinating.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.coordinating.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/metrics-svc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/metrics-svc.yaml new file mode 100644 index 00000000..0a446eb6 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( include "opensearch.coordinating.enabled" . ) .Values.coordinating.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "opensearch.coordinating.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + {{- $defaultAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" (.Values.coordinating.metrics.service.port | quote) "prometheus.io/path" "/_prometheus/metrics" }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list $defaultAnnotations .Values.coordinating.metrics.service.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + {{- if .Values.coordinating.metrics.service.clusterIP }} + clusterIP: {{ .Values.coordinating.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + targetPort: rest-api + port: {{ .Values.coordinating.metrics.service.ports.metrics }} + protocol: TCP + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/networkpolicy.yaml new file mode 100644 index 00000000..c281f6de --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/networkpolicy.yaml @@ -0,0 +1,91 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.coordinating.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "opensearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + policyTypes: + - Ingress + - Egress + {{- if .Values.coordinating.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + - port: {{ .Values.containerPorts.restAPI }} + - port: {{ .Values.containerPorts.transport }} + - port: {{ .Values.dashboards.service.ports.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.coordinating.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.containerPorts.restAPI }} + - port: {{ .Values.containerPorts.transport }} + {{- if not .Values.coordinating.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.coordinating.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.coordinating.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.coordinating.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.coordinating.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.coordinating.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/pdb.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/pdb.yaml new file mode 100644 index 00000000..cc95a9ba --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( include "opensearch.coordinating.enabled" . ) .Values.coordinating.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opensearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.coordinating.pdb.minAvailable }} + minAvailable: {{ .Values.coordinating.pdb.minAvailable }} + {{- end }} + {{- if or .Values.coordinating.pdb.maxUnavailable (not .Values.coordinating.pdb.minAvailable) }} + maxUnavailable: {{ .Values.coordinating.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: coordinating-only +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/serviceaccount.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/serviceaccount.yaml new file mode 100644 index 00000000..286179b9 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.coordinating.enabled" .) .Values.coordinating.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opensearch.coordinating.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if or .Values.coordinating.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.coordinating.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/servicemonitor.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/servicemonitor.yaml new file mode 100644 index 00000000..5b81dba9 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.coordinating.enabled" . ) .Values.coordinating.metrics.enabled .Values.coordinating.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "opensearch.coordinating.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.coordinating.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" (dict "values" (list .Values.coordinating.metrics.serviceMonitor.labels .Values.commonLabels) "context" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + {{- if or .Values.coordinating.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.coordinating.metrics.serviceMonitor.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.coordinating.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + prometheus.io/scrape: "true" + {{- if .Values.coordinating.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/_prometheus/metrics" + {{- if .Values.coordinating.metrics.serviceMonitor.interval }} + interval: {{ .Values.coordinating.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.coordinating.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.coordinating.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.coordinating.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.coordinating.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.coordinating.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/statefulset.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/statefulset.yaml new file mode 100644 index 00000000..329738e6 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/statefulset.yaml @@ -0,0 +1,347 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.coordinating.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "opensearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- end }} + {{- if or .Values.coordinating.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.coordinating.autoscaling.hpa.enabled }} + replicas: {{ .Values.coordinating.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.coordinating.updateStrategy }} + updateStrategy: {{- toYaml .Values.coordinating.updateStrategy | nindent 4 }} + {{- end }} + serviceName: {{ include "opensearch.coordinating.servicename" . }} + podManagementPolicy: {{ .Values.coordinating.podManagementPolicy }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- end }} + annotations: + {{- if and (include "opensearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.coordinating.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "opensearch.coordinating.serviceAccountName" . }} + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.coordinating.automountServiceAccountToken }} + {{- if .Values.coordinating.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.coordinating.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.schedulerName }} + schedulerName: {{ .Values.coordinating.schedulerName }} + {{- end }} + {{- if .Values.coordinating.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.priorityClassName }} + priorityClassName: {{ .Values.coordinating.priorityClassName | quote }} + {{- end }} + {{- if .Values.coordinating.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.coordinating.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.coordinating.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.coordinating.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + {{- include "opensearch.sysctl.initContainer" . | nindent 8}} + {{- end }} + {{- include "opensearch.copy-default-plugins.initContainer" (dict "component" "coordinating" "context" $) | nindent 8 }} + {{- if .Values.coordinating.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: opensearch + image: {{ include "opensearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.coordinating.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.coordinating.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.coordinating.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.coordinating.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: OPENSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: OPENSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: OPENSEARCH_NODE_ROLES + value: {{ .Values.coordinating.extraRoles | join "," | quote }} + - name: OPENSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: OPENSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: OPENSEARCH_CLUSTER_HOSTS + value: {{ include "opensearch.hosts" . | quote }} + - name: OPENSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) (ternary .Values.data.autoscaling.hpa.minReplicas .Values.data.replicaCount .Values.data.autoscaling.hpa.enabled) | quote }} + - name: OPENSEARCH_CLUSTER_MASTER_HOSTS + {{- $opensearchMasterFullname := include "opensearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $opensearchMasterFullname $e }} {{ end }} + - name: OPENSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) 2) 1 | quote }} + - name: OPENSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "opensearch.coordinating.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: OPENSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.coordinating.heapSize }} + - name: OPENSEARCH_HEAP_SIZE + value: {{ .Values.coordinating.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "opensearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.coordinating.extraEnvVarsCM .Values.coordinating.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.coordinating.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.coordinating.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.coordinating.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.coordinating.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.coordinating.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /opt/drycc/scripts/opensearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.coordinating.resources }} + resources: {{- toYaml .Values.coordinating.resources | nindent 12 }} + {{- else if ne .Values.coordinating.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.coordinating.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/config + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/plugins + subPath: app-plugins-dir + - name: data + mountPath: /drycc/opensearch/data + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + mountPath: /opt/drycc/opensearch/secrets + {{- end }} + {{- if .Values.config }} + - mountPath: /opt/drycc/opensearch/config/opensearch.yml + name: config + subPath: opensearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/drycc/opensearch/config/my_opensearch.yml + name: config + subPath: my_opensearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /opt/drycc/opensearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.snapshots.enabled }} + - mountPath: {{ include "opensearch.snapshots.mountPath" . }} + {{- if .Values.snapshots.persistence.enabled }} + name: snapshots + {{- else }} + name: empty-dir + subPath: app-snapshots-dir + {{- end }} + {{- end }} + {{- if .Values.coordinating.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: "data" + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + projected: + sources: + - secret: + name: {{ include "opensearch.secretName" . }} + {{- if or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret }} + - secret: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + {{- end }} + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "coordinating" "context" $) }} + items: {{- include "opensearch.node.tlsSecretItems" (dict "nodeRole" "coordinating" "context" $) | nindent 20 }} + - secret: + name: {{ include "opensearch.admin.tlsSecretName" (dict "context" $) }} + items: {{- include "opensearch.admin.tlsSecretItems" (dict "context" $) | nindent 20 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "opensearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "opensearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "opensearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if and .Values.snapshots.enabled .Values.snapshots.persistence.enabled }} + - name: snapshots + persistentVolumeClaim: + claimName: {{ include "opensearch.snapshots.fullname" . }} + {{- end }} + {{- if .Values.coordinating.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/svc-headless.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/svc-headless.yaml new file mode 100644 index 00000000..0dcc39e3 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/svc-headless.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.coordinating.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.coordinating.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if or .Values.coordinating.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if semverCompare ">=1.31-0" (include "common.capabilities.kubeVersion" .) }} + trafficDistribution: {{ .Values.coordinating.service.headless.trafficDistribution }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/vpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/vpa.yaml new file mode 100644 index 00000000..51adaf80 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/coordinating/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.coordinating.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "opensearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: coordinating-only + {{- if or .Values.coordinating.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.coordinating.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: opensearch + {{- with .Values.coordinating.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.coordinating.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.coordinating.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ include "opensearch.coordinating.fullname" . }} + {{- if .Values.coordinating.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.coordinating.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/deployment.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/deployment.yaml new file mode 100644 index 00000000..13664cbc --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/deployment.yaml @@ -0,0 +1,239 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.dashboards.enabled" .) }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.dashboards.autoscaling.hpa.enabled }} + replicas: {{ .Values.dashboards.replicaCount }} + {{- end }} + {{- if .Values.dashboards.updateStrategy }} + strategy: {{- toYaml .Values.dashboards.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dashboards + template: + metadata: + {{- if .Values.dashboards.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: dashboards + spec: + serviceAccountName: {{ template "opensearch.dashboards.serviceAccountName" . }} + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.dashboards.automountServiceAccountToken }} + {{- if .Values.dashboards.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dashboards.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dashboards.podAffinityPreset "component" "dashboards" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dashboards.podAntiAffinityPreset "component" "dashboards" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.dashboards.nodeAffinityPreset.type "key" .Values.dashboards.nodeAffinityPreset.key "values" .Values.dashboards.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.dashboards.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dashboards.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dashboards.priorityClassName }} + priorityClassName: {{ .Values.dashboards.priorityClassName | quote }} + {{- end }} + {{- if .Values.dashboards.schedulerName }} + schedulerName: {{ .Values.dashboards.schedulerName | quote }} + {{- end }} + {{- if .Values.dashboards.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dashboards.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dashboards.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dashboards.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.dashboards.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- include "opensearch.dashboards.copy-default-plugins.initContainer" . | nindent 8 }} + {{- if .Values.dashboards.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: {{ include "opensearch.dashboards.fullname" . }} + image: {{ template "opensearch.dashboards.image" . }} + imagePullPolicy: {{ .Values.dashboards.image.pullPolicy }} + {{- if .Values.dashboards.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dashboards.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.dashboards.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.dashboards.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.dashboards.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.security.enabled }} + {{- if .Values.usePasswordFiles }} + - name: OPENSEARCH_DASHBOARDS_PASSWORD_FILE + value: "/opt/drycc/opensearch-dashboards/secrets/opensearch-dashboards-password" + {{- else }} + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.secretName" . }} + key: opensearch-dashboards-password + {{- end }} + {{- end }} + {{- if .Values.dashboards.tls.enabled }} + - name: OPENSEARCH_DASHBOARDS_SERVER_ENABLE_TLS + value: "true" + - name: OPENSEARCH_DASHBOARDS_SERVER_TLS_USE_PEM + value: "true" + {{- end }} + - name: OPENSEARCH_DASHBOARDS_OPENSEARCH_URL + value: {{ (include "opensearch.url" .) }} + {{- if .Values.dashboards.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.dashboards.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dashboards.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.dashboards.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dashboards.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.dashboards.resources }} + resources: {{- toYaml .Values.dashboards.resources | nindent 12 }} + {{- else if ne .Values.dashboards.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.dashboards.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.dashboards.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.dashboards.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.dashboards.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dashboards.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: {{ .Values.dashboards.containerPorts.http }} + {{- end }} + {{- if .Values.dashboards.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.dashboards.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dashboards.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: "/" + port: {{ .Values.dashboards.containerPorts.http }} + {{- end }} + {{- if .Values.dashboards.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.dashboards.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dashboards.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: "/" + port: {{ .Values.dashboards.containerPorts.http }} + {{- end }} + {{- end }} + {{- if .Values.dashboards.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch-dashboards/config + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch-dashboards/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch-dashboards/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch-dashboards/plugins + subPath: app-plugins-dir + - name: dashboards-data + mountPath: /drycc/opensearch-dashboards + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-dashboards-secrets + mountPath: /opt/drycc/opensearch-dashboards/secrets + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /opt/drycc/opensearch-dashboards/config/certs/opensearch + readOnly: true + {{- end }} + {{- if .Values.dashboards.tls.enabled }} + - name: opensearch-dashboard-certificates + mountPath: /opt/drycc/opensearch-dashboards/config/certs/server + readOnly: true + {{- end }} + {{- if .Values.dashboards.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dashboards.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-dashboards-secrets + secret: + secretName: {{ include "opensearch.secretName" . }} + items: + - key: opensearch-dashboards-password + path: opensearch-dashboards-password + {{- end }} + - name: dashboards-data + {{- if .Values.dashboards.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "opensearch.dashboards.pvc" . }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + secret: + secretName: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "master" "context" $) }} + items: + - key: {{ include "opensearch.node.tlsSecretCAKey" (dict "nodeRole" "master" "context" $) }} + path: ca.crt + {{- end }} + {{- if .Values.dashboards.tls.enabled }} + - name: opensearch-dashboard-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.dashboards.tlsSecretName" . }} + {{- end }} + {{- if .Values.dashboards.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/hpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/hpa.yaml new file mode 100644 index 00000000..fe1ccbaf --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.dashboards.enabled" .) .Values.dashboards.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "opensearch.dashboards.fullname" . }} + minReplicas: {{ .Values.dashboards.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.dashboards.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.dashboards.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.dashboards.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.dashboards.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.dashboards.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress-tls-secret.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress-tls-secret.yaml new file mode 100644 index 00000000..5c7d5aac --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress-tls-secret.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and .Values.dashboards.enabled .Values.dashboards.ingress.enabled }} +{{- if .Values.dashboards.ingress.secrets }} +{{- range .Values.dashboards.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ template "common.names.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.dashboards.ingress.tls .Values.dashboards.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.dashboards.ingress.hostname }} +{{- $ca := genCA "opensearch-ca" 365 }} +{{- $cert := genSignedCert .Values.dashboards.ingress.hostname nil (list .Values.dashboards.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "common.names.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress.yaml new file mode 100644 index 00000000..0b574ae0 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/ingress.yaml @@ -0,0 +1,58 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dashboards.enabled .Values.dashboards.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if or .Values.dashboards.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.dashboards.ingress.ingressClassName }} + ingressClassName: {{ .Values.dashboards.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.dashboards.ingress.hostname }} + - http: + paths: + {{- if .Values.dashboards.ingress.extraPaths }} + {{- toYaml .Values.dashboards.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.dashboards.ingress.path }} + pathType: {{ .Values.dashboards.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "opensearch.dashboards.servicename" .) "servicePort" "http" "context" $) | nindent 14 }} + {{- if ne .Values.dashboards.ingress.hostname "*" }} + host: {{ .Values.dashboards.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.dashboards.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "opensearch.dashboards.servicename" $) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.dashboards.ingress.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.ingress.extraRules "context" $ ) | nindent 4 }} + {{- end }} + {{- if or (and .Values.dashboards.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.dashboards.ingress.annotations )) .Values.dashboards.ingress.selfSigned)) .Values.dashboards.ingress.extraTls }} + tls: + {{- if and .Values.dashboards.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.dashboards.ingress.annotations )) .Values.dashboards.ingress.selfSigned) }} + - hosts: + - {{ .Values.dashboards.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.dashboards.ingress.hostname }} + {{- end }} + {{- if .Values.dashboards.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/networkpolicy.yaml new file mode 100644 index 00000000..d43027d2 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/networkpolicy.yaml @@ -0,0 +1,93 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dashboards.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: dashboards + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dashboards + policyTypes: + - Ingress + - Egress + {{- if .Values.dashboards.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + - port: {{ .Values.containerPorts.restAPI }} + - port: {{ .Values.containerPorts.transport }} + - port: {{ .Values.dashboards.service.ports.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.dashboards.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + {{- if eq .Values.dashboards.service.type "LoadBalancer" }} + - {} + {{- else }} + - ports: + - port: {{ .Values.dashboards.containerPorts.http }} + {{- if not .Values.dashboards.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.dashboards.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.dashboards.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.dashboards.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.dashboards.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.dashboards.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.dashboards.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.dashboards.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dashboards.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pdb.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pdb.yaml new file mode 100644 index 00000000..8d8f76b7 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.dashboards.enabled" .) .Values.dashboards.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.dashboards.pdb.minAvailable }} + minAvailable: {{ .Values.dashboards.pdb.minAvailable }} + {{- end }} + {{- if or .Values.dashboards.pdb.maxUnavailable (not .Values.dashboards.pdb.minAvailable) }} + maxUnavailable: {{ .Values.dashboards.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: dashboards +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pvc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pvc.yaml new file mode 100644 index 00000000..dfba2806 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/pvc.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and .Values.dashboards.persistence.enabled (not .Values.dashboards.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels .Values.dashboards.persistence.labels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if or .Values.dashboards.persistence.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.dashboards.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.dashboards.persistence.size | quote }} + {{- if .Values.dashboards.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.dashboards.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.dashboards.persistence "global" .Values.global) | nindent 2 }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/service.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/service.yaml new file mode 100644 index 00000000..b9501595 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/service.yaml @@ -0,0 +1,54 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.dashboards.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.dashboards.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if or .Values.dashboards.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.dashboards.service.type }} + {{- if and .Values.dashboards.service.clusterIP (eq .Values.dashboards.service.type "ClusterIP") }} + clusterIP: {{ .Values.dashboards.service.clusterIP }} + {{- end }} + {{- if .Values.dashboards.service.sessionAffinity }} + sessionAffinity: {{ .Values.dashboards.service.sessionAffinity }} + {{- end }} + {{- if .Values.dashboards.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.dashboards.service.type "LoadBalancer") (eq .Values.dashboards.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.dashboards.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.dashboards.service.type "LoadBalancer") (not (empty .Values.dashboards.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.dashboards.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.dashboards.service.type "LoadBalancer") (not (empty .Values.dashboards.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.dashboards.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.dashboards.service.ports.http }} + protocol: TCP + targetPort: http + {{- if and (or (eq .Values.dashboards.service.type "NodePort") (eq .Values.dashboards.service.type "LoadBalancer")) (not (empty .Values.dashboards.service.nodePorts.http)) }} + nodePort: {{ .Values.dashboards.service.nodePorts.http }} + {{- else if eq .Values.dashboards.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.dashboards.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dashboards.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/serviceaccount.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/serviceaccount.yaml new file mode 100644 index 00000000..fdecc1e5 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.dashboards.enabled" .) .Values.dashboards.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opensearch.dashboards.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: dashboards + {{- if or .Values.dashboards.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.dashboards.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/tls-secret.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/tls-secret.yaml new file mode 100644 index 00000000..94144d57 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/tls-secret.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.dashboards.enabled" .) (include "opensearch.dashboards.createTlsSecret" .) }} +{{- $ca := genCA "opensearch-ca" 36500 }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $dashboardsFullname := include "opensearch.dashboards.fullname" . }} +{{- $cert := genSignedCert $dashboardsFullname nil nil 36500 $ca }} +{{- $secretDashboardsName := printf "%s-crt" (include "opensearch.dashboards.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretDashboardsName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretDashboardsName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretDashboardsName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretDashboardsName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/vpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/vpa.yaml new file mode 100644 index 00000000..e933185d --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/dashboards/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.dashboards.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "opensearch.dashboards.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: dashboards + {{- if or .Values.dashboards.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.dashboards.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: opensearch + {{- with .Values.dashboards.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dashboards.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dashboards.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ include "opensearch.dashboards.fullname" . }} + {{- if .Values.dashboards.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.dashboards.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/hpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/hpa.yaml new file mode 100644 index 00000000..0ec1ae27 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.data.enabled" .) .Values.data.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opensearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "opensearch.data.fullname" . }} + minReplicas: {{ .Values.data.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.data.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.data.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.data.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.data.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.data.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/metrics-svc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/metrics-svc.yaml new file mode 100644 index 00000000..99d15800 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/metrics-svc.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.data.enabled" . ) .Values.data.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "opensearch.data.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: data + {{- $defaultAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" (.Values.data.metrics.service.port | quote) "prometheus.io/path" "/_prometheus/metrics" }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list $defaultAnnotations .Values.data.metrics.service.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + {{- if .Values.data.metrics.service.clusterIP }} + clusterIP: {{ .Values.data.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + targetPort: rest-api + port: {{ .Values.data.metrics.service.ports.metrics }} + protocol: TCP + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/networkpolicy.yaml new file mode 100644 index 00000000..a030e34f --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/networkpolicy.yaml @@ -0,0 +1,90 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.data.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "opensearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: data + policyTypes: + - Ingress + - Egress + {{- if .Values.data.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + - port: {{ .Values.containerPorts.restAPI }} + - port: {{ .Values.containerPorts.transport }} + - port: {{ .Values.dashboards.service.ports.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.data.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.containerPorts.restAPI }} + - port: {{ .Values.containerPorts.transport }} + {{- if not .Values.data.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.data.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.data.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.data.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.data.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.data.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.data.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.data.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/pdb.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/pdb.yaml new file mode 100644 index 00000000..77f88ea2 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( include "opensearch.data.enabled" . ) .Values.data.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opensearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.data.pdb.minAvailable }} + minAvailable: {{ .Values.data.pdb.minAvailable }} + {{- end }} + {{- if or .Values.data.pdb.maxUnavailable (not .Values.data.pdb.minAvailable) }} + maxUnavailable: {{ .Values.data.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: data +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/serviceaccount.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/serviceaccount.yaml new file mode 100644 index 00000000..f3afc48b --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.data.enabled" .) .Values.data.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opensearch.data.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if or .Values.data.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.data.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/servicemonitor.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/servicemonitor.yaml new file mode 100644 index 00000000..9a81edd0 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.data.enabled" . ) .Values.data.metrics.enabled .Values.data.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "opensearch.data.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.data.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" (dict "values" (list .Values.data.metrics.serviceMonitor.labels .Values.commonLabels) "context" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: data + {{- if or .Values.data.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.data.metrics.serviceMonitor.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.data.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: data + prometheus.io/scrape: "true" + {{- if .Values.data.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/_prometheus/metrics" + {{- if .Values.data.metrics.serviceMonitor.interval }} + interval: {{ .Values.data.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.data.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.data.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.data.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.data.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.data.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.data.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.data.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/statefulset.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/statefulset.yaml new file mode 100644 index 00000000..4521e629 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/statefulset.yaml @@ -0,0 +1,413 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.data.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "opensearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- end }} + {{- if or .Values.data.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.data.autoscaling.hpa.enabled }} + replicas: {{ .Values.data.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.data.podManagementPolicy }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: data + serviceName: {{ include "opensearch.data.servicename" . }} + {{- if .Values.data.updateStrategy }} + updateStrategy: {{- toYaml .Values.data.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: data + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- end }} + annotations: + {{- if and (include "opensearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.data.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "opensearch.data.serviceAccountName" . }} + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.data.automountServiceAccountToken }} + {{- if .Values.data.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.data.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.schedulerName }} + schedulerName: {{ .Values.data.schedulerName }} + {{- end }} + {{- if .Values.data.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.priorityClassName }} + priorityClassName: {{ .Values.data.priorityClassName | quote }} + {{- end }} + {{- if .Values.data.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.data.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + {{- include "opensearch.sysctl.initContainer" . | nindent 8}} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }} + - name: volume-permissions + image: {{ include "opensearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p /drycc/opensearch/data + chown {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }} /drycc/opensearch/data + find /drycc/opensearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /drycc/opensearch/data + {{- end }} + {{- include "opensearch.copy-default-plugins.initContainer" (dict "component" "data" "context" $) | nindent 8 }} + {{- if .Values.data.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: opensearch + image: {{ include "opensearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.data.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.data.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.data.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.data.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.data.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.data.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.data.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPENSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: OPENSEARCH_NODE_ROLES + value: {{ prepend .Values.data.extraRoles "data" | join "," | quote }} + - name: OPENSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: OPENSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: OPENSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: OPENSEARCH_CLUSTER_HOSTS + value: {{ include "opensearch.hosts" . | quote }} + - name: OPENSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) (ternary .Values.data.autoscaling.hpa.minReplicas .Values.data.replicaCount .Values.data.autoscaling.hpa.enabled) | quote }} + - name: OPENSEARCH_CLUSTER_MASTER_HOSTS + {{- $opensearchMasterFullname := include "opensearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $opensearchMasterFullname $e }} {{ end }} + - name: OPENSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) 2) 1 | quote }} + - name: OPENSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "opensearch.data.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: OPENSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: OPENSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + {{- if .Values.data.heapSize }} + - name: OPENSEARCH_HEAP_SIZE + value: {{ .Values.data.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "opensearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.data.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.data.extraEnvVarsCM .Values.data.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.data.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.data.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.data.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.data.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.data.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.data.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.data.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.data.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /opt/drycc/scripts/opensearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.data.resources }} + resources: {{- toYaml .Values.data.resources | nindent 12 }} + {{- else if ne .Values.data.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.data.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/config + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/plugins + subPath: app-plugins-dir + - name: data + mountPath: /drycc/opensearch/data + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + mountPath: /opt/drycc/opensearch/secrets + {{- end }} + {{- if .Values.config }} + - mountPath: /opt/drycc/opensearch/config/opensearch.yml + name: config + subPath: opensearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/drycc/opensearch/config/my_opensearch.yml + name: config + subPath: my_opensearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /opt/drycc/opensearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.snapshots.enabled }} + - mountPath: {{ include "opensearch.snapshots.mountPath" . }} + {{- if .Values.snapshots.persistence.enabled }} + name: snapshots + {{- else }} + name: empty-dir + subPath: app-snapshots-dir + {{- end }} + {{- end }} + {{- if .Values.data.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + projected: + sources: + - secret: + name: {{ include "opensearch.secretName" . }} + {{- if or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret }} + - secret: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + {{- end }} + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "data" "context" $) }} + items: {{- include "opensearch.node.tlsSecretItems" (dict "nodeRole" "data" "context" $) | nindent 20 }} + - secret: + name: {{ include "opensearch.admin.tlsSecretName" (dict "context" $) }} + items: {{- include "opensearch.admin.tlsSecretItems" (dict "context" $) | nindent 20 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "opensearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "opensearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "opensearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if and .Values.snapshots.enabled .Values.snapshots.persistence.enabled }} + - name: snapshots + persistentVolumeClaim: + claimName: {{ include "opensearch.snapshots.fullname" . }} + {{- end }} + {{- if .Values.data.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.data.persistence.enabled }} + - name: "data" + emptyDir: {} + {{- else if .Values.data.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.data.persistence.existingClaim }} + {{- else }} + {{- if .Values.data.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.data.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.data.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "data" + {{- if or .Values.data.persistence.annotations .Values.commonAnnotations }} + {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.data.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.data.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.data.persistence.existingVolume }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.data.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/svc-headless.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/svc-headless.yaml new file mode 100644 index 00000000..27e42ad0 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/svc-headless.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.data.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.data.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if or .Values.data.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if semverCompare ">=1.31-0" (include "common.capabilities.kubeVersion" .) }} + trafficDistribution: {{ .Values.data.service.headless.trafficDistribution }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/vpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/vpa.yaml new file mode 100644 index 00000000..743b0094 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/data/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.data.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "opensearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: data + {{- if or .Values.data.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: opensearch + {{- with .Values.data.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ include "opensearch.data.fullname" . }} + {{- if .Values.data.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.data.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/extra-list.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/extra-list.yaml new file mode 100644 index 00000000..9570df4a --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/hpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/hpa.yaml new file mode 100644 index 00000000..13610967 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.ingest.enabled" .) .Values.ingest.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "opensearch.ingest.fullname" . }} + minReplicas: {{ .Values.ingest.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.ingest.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.ingest.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.ingest.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.ingest.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.ingest.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/ingress.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/ingress.yaml new file mode 100644 index 00000000..47bccb09 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/ingress.yaml @@ -0,0 +1,58 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.ingest.enabled" .) .Values.ingest.service.enabled .Values.ingest.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingest.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingest.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingest.ingress.hostname }} + - http: + paths: + {{- if .Values.ingest.ingress.extraPaths }} + {{- toYaml .Values.ingest.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingest.ingress.path }} + pathType: {{ .Values.ingest.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "opensearch.ingest.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- if ne .Values.ingest.ingress.hostname "*" }} + host: {{ .Values.ingest.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.ingest.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "opensearch.ingest.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingest.ingress.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.ingress.extraRules "context" $ ) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned)) .Values.ingest.ingress.extraTls }} + tls: + {{- if and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingest.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingest.ingress.hostname }} + {{- end }} + {{- if .Values.ingest.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/metrics-svc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/metrics-svc.yaml new file mode 100644 index 00000000..f760fb38 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/metrics-svc.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.ingest.enabled" . ) .Values.ingest.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "opensearch.ingest.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + {{- $defaultAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" (.Values.ingest.metrics.service.port | quote) "prometheus.io/path" "/_prometheus/metrics" }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list $defaultAnnotations .Values.ingest.metrics.service.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + {{- if .Values.ingest.metrics.service.clusterIP }} + clusterIP: {{ .Values.ingest.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + targetPort: rest-api + port: {{ .Values.ingest.metrics.service.ports.metrics }} + protocol: TCP + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/networkpolicy.yaml new file mode 100644 index 00000000..ab02d666 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/networkpolicy.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingest.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + - port: {{ .Values.dashboards.service.ports.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.ingest.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + {{- if eq .Values.ingest.service.type "LoadBalancer" }} + - {} + {{- else }} + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + {{- if not .Values.ingest.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.ingest.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.ingest.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.ingest.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.ingest.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.ingest.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.ingest.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingest.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/pdb.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/pdb.yaml new file mode 100644 index 00000000..42355541 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( include "opensearch.ingest.enabled" . ) .Values.ingest.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingest.pdb.minAvailable }} + minAvailable: {{ .Values.ingest.pdb.minAvailable }} + {{- end }} + {{- if or .Values.ingest.pdb.maxUnavailable (not .Values.ingest.pdb.minAvailable) }} + maxUnavailable: {{ .Values.ingest.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/service.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/service.yaml new file mode 100644 index 00000000..00fb8d8e --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/service.yaml @@ -0,0 +1,60 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.ingest.enabled" .) .Values.ingest.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.ingest.service.type }} + {{- if and .Values.ingest.service.clusterIP (eq .Values.ingest.service.type "ClusterIP") }} + clusterIP: {{ .Values.ingest.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.ingest.service.type "LoadBalancer") (eq .Values.ingest.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.ingest.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.ingest.service.type "LoadBalancer") .Values.ingest.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.ingest.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if (and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }} + {{- end }} + {{- if .Values.ingest.service.sessionAffinity }} + sessionAffinity: {{ .Values.ingest.service.sessionAffinity }} + {{- end }} + {{- if .Values.ingest.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-rest-api + port: {{ .Values.ingest.service.ports.restAPI }} + targetPort: rest-api + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.restAPI)) }} + nodePort: {{ .Values.ingest.service.nodePorts.restAPI }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: {{ .Values.ingest.service.ports.transport }} + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.transport)) }} + nodePort: {{ .Values.ingest.service.nodePorts.transport }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.ingest.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/serviceaccount.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/serviceaccount.yaml new file mode 100644 index 00000000..5f340a9c --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.ingest.enabled" .) .Values.ingest.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opensearch.ingest.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.ingest.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/servicemonitor.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/servicemonitor.yaml new file mode 100644 index 00000000..43cb64bc --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.ingest.enabled" . ) .Values.ingest.metrics.enabled .Values.ingest.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.ingest.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" (dict "values" (list .Values.ingest.metrics.serviceMonitor.labels .Values.commonLabels) "context" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.ingest.metrics.serviceMonitor.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.ingest.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + prometheus.io/scrape: "true" + {{- if .Values.ingest.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/_prometheus/metrics" + {{- if .Values.ingest.metrics.serviceMonitor.interval }} + interval: {{ .Values.ingest.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingest.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingest.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingest.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.ingest.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.ingest.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/statefulset.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/statefulset.yaml new file mode 100644 index 00000000..bd5e5b38 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/statefulset.yaml @@ -0,0 +1,347 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.ingest.enabled" . ) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- end }} + {{- if or .Values.ingest.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.ingest.autoscaling.hpa.enabled }} + replicas: {{ .Values.ingest.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.ingest.podManagementPolicy }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: ingest + serviceName: {{ include "opensearch.ingest.servicename" . }} + {{- if .Values.ingest.updateStrategy }} + updateStrategy: {{- toYaml .Values.ingest.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: ingest + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- end }} + annotations: + {{- if and (include "opensearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.ingest.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "opensearch.ingest.serviceAccountName" . }} + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.ingest.automountServiceAccountToken }} + {{- if .Values.ingest.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.ingest.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.schedulerName }} + schedulerName: {{ .Values.ingest.schedulerName }} + {{- end }} + {{- if .Values.ingest.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.priorityClassName }} + priorityClassName: {{ .Values.ingest.priorityClassName | quote }} + {{- end }} + {{- if .Values.ingest.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.ingest.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.ingest.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + {{- include "opensearch.sysctl.initContainer" . | nindent 8}} + {{- end }} + {{- include "opensearch.copy-default-plugins.initContainer" (dict "component" "ingest" "context" $) | nindent 8 }} + {{- if .Values.ingest.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: opensearch + image: {{ include "opensearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.ingest.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.ingest.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.ingest.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.ingest.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPENSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: OPENSEARCH_NODE_ROLES + value: {{ prepend .Values.ingest.extraRoles "ingest" | join "," | quote }} + - name: OPENSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: OPENSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: OPENSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: OPENSEARCH_CLUSTER_HOSTS + value: {{ include "opensearch.hosts" . | quote }} + - name: OPENSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) (ternary .Values.data.autoscaling.hpa.minReplicas .Values.data.replicaCount .Values.data.autoscaling.hpa.enabled) | quote }} + - name: OPENSEARCH_CLUSTER_MASTER_HOSTS + {{- $opensearchMasterFullname := include "opensearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $opensearchMasterFullname $e }} {{ end }} + - name: OPENSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) 2) 1 | quote }} + - name: OPENSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "opensearch.ingest.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: OPENSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.ingest.heapSize }} + - name: OPENSEARCH_HEAP_SIZE + value: {{ .Values.ingest.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "opensearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.ingest.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.ingest.extraEnvVarsCM .Values.ingest.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.ingest.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.ingest.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.ingest.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.ingest.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.ingest.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /opt/drycc/scripts/opensearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.ingest.resources }} + resources: {{- toYaml .Values.ingest.resources | nindent 12 }} + {{- else if ne .Values.ingest.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.ingest.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/config + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/plugins + subPath: app-plugins-dir + - name: data + mountPath: /drycc/opensearch/data + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + mountPath: /opt/drycc/opensearch/secrets + {{- end }} + {{- if .Values.config }} + - mountPath: /opt/drycc/opensearch/config/opensearch.yml + name: config + subPath: opensearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/drycc/opensearch/config/my_opensearch.yml + name: config + subPath: my_opensearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /opt/drycc/opensearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.snapshots.enabled }} + - mountPath: {{ include "opensearch.snapshots.mountPath" . }} + {{- if .Values.snapshots.persistence.enabled }} + name: snapshots + {{- else }} + name: empty-dir + subPath: app-snapshots-dir + {{- end }} + {{- end }} + {{- if .Values.ingest.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + - name: "data" + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + projected: + sources: + - secret: + name: {{ include "opensearch.secretName" . }} + {{- if or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret }} + - secret: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + {{- end }} + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "ingest" "context" $) }} + items: {{- include "opensearch.node.tlsSecretItems" (dict "nodeRole" "ingest" "context" $) | nindent 20 }} + - secret: + name: {{ include "opensearch.admin.tlsSecretName" (dict "context" $) }} + items: {{- include "opensearch.admin.tlsSecretItems" (dict "context" $) | nindent 20 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "opensearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "opensearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "opensearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if and .Values.snapshots.enabled .Values.snapshots.persistence.enabled }} + - name: snapshots + persistentVolumeClaim: + claimName: {{ include "opensearch.snapshots.fullname" . }} + {{- end }} + {{- if .Values.ingest.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/svc-headless.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/svc-headless.yaml new file mode 100644 index 00000000..879d4eb5 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/svc-headless.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.ingest.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.ingest.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if semverCompare ">=1.31-0" (include "common.capabilities.kubeVersion" .) }} + trafficDistribution: {{ .Values.ingest.service.headless.trafficDistribution }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/vpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/vpa.yaml new file mode 100644 index 00000000..ffd1382a --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingest/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.ingest.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "opensearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: ingest + {{- if or .Values.ingest.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: opensearch + {{- with .Values.ingest.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingest.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingest.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ include "opensearch.ingest.fullname" . }} + {{- if .Values.ingest.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.ingest.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress-tls-secrets.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress-tls-secrets.yaml new file mode 100644 index 00000000..56aba171 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress-tls-secrets.yaml @@ -0,0 +1,87 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ template "common.names.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "opensearch-ca" 36500 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 36500 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "common.names.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} +{{- if .Values.ingest.ingress.enabled }} +{{- if .Values.ingest.ingress.secrets }} +{{- range .Values.ingest.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ printf "%s-ingest-ingress" (include "common.names.namespace" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingest.ingress.tls .Values.ingest.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingest.ingress.hostname }} +{{- $ca := genCA "opensearch-ingest-ca" 36500 }} +{{- $cert := genSignedCert .Values.ingest.ingress.hostname nil (list .Values.ingest.ingress.hostname) 36500 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "common.names.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress.yaml new file mode 100644 index 00000000..bb4a9834 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: opensearch + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + pathType: {{ .Values.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- if ne .Values.ingress.hostname "*" }} + host: {{ .Values.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraRules "context" $ ) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/initialization-configmap.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/initialization-configmap.yaml new file mode 100644 index 00000000..03b089d8 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/initialization-configmap.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/hpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/hpa.yaml new file mode 100644 index 00000000..ca6a7b70 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/hpa.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.master.enabled" .) .Values.master.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opensearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "opensearch.master.fullname" . }} + minReplicas: {{ .Values.master.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.master.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.master.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.master.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.master.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.master.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/metrics-svc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/metrics-svc.yaml new file mode 100644 index 00000000..7dc899b3 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/metrics-svc.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.master.enabled" . ) .Values.master.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "opensearch.master.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + {{- $defaultAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" (.Values.master.metrics.service.port | quote) "prometheus.io/path" "/_prometheus/metrics" }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list $defaultAnnotations .Values.master.metrics.service.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + type: ClusterIP + {{- if .Values.master.metrics.service.clusterIP }} + clusterIP: {{ .Values.master.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + targetPort: rest-api + port: {{ .Values.master.metrics.service.ports.metrics }} + protocol: TCP + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/networkpolicy.yaml new file mode 100644 index 00000000..25b26192 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/networkpolicy.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.master.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "opensearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + - port: {{ .Values.dashboards.service.ports.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.master.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + {{- if eq .Values.service.type "LoadBalancer" }} + - {} + {{- else }} + - ports: + - port: {{ .Values.service.ports.restAPI }} + - port: {{ .Values.service.ports.transport }} + {{- if not .Values.master.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.master.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.master.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.master.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.master.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.master.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.master.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.master.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/pdb.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/pdb.yaml new file mode 100644 index 00000000..a82b883a --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and ( include "opensearch.master.enabled" . ) .Values.master.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opensearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.master.pdb.minAvailable }} + minAvailable: {{ .Values.master.pdb.minAvailable }} + {{- end }} + {{- if or .Values.master.pdb.maxUnavailable (not .Values.master.pdb.minAvailable) }} + maxUnavailable: {{ .Values.master.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/serviceaccount.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/serviceaccount.yaml new file mode 100644 index 00000000..7024d340 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "opensearch.master.enabled" .) .Values.master.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opensearch.master.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if or .Values.master.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/servicemonitor.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/servicemonitor.yaml new file mode 100644 index 00000000..992e9b45 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and ( include "opensearch.master.enabled" . ) .Values.master.metrics.enabled .Values.master.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "opensearch.master.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.master.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" (dict "values" (list .Values.master.metrics.serviceMonitor.labels .Values.commonLabels) "context" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + {{- if or .Values.master.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.master.metrics.serviceMonitor.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.master.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + prometheus.io/scrape: "true" + {{- if .Values.master.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/_prometheus/metrics" + {{- if .Values.master.metrics.serviceMonitor.interval }} + interval: {{ .Values.master.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.master.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.master.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.master.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.master.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.master.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.master.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.master.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/statefulset.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/statefulset.yaml new file mode 100644 index 00000000..96ede298 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/statefulset.yaml @@ -0,0 +1,425 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.master.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "opensearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- end }} + {{- if or .Values.master.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.master.autoscaling.hpa.enabled }} + replicas: {{ .Values.master.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.master.podManagementPolicy }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: master + serviceName: {{ include "opensearch.master.servicename" . }} + {{- if .Values.master.updateStrategy }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: master + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- end }} + annotations: + {{- if and (include "opensearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "opensearch.master.serviceAccountName" . }} + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.master.automountServiceAccountToken }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.master.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + {{- include "opensearch.sysctl.initContainer" . | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} + - name: volume-permissions + image: {{ include "opensearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p /drycc/opensearch/data + chown {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} /drycc/opensearch/data + find /drycc/opensearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /drycc/opensearch/data + {{- end }} + {{- include "opensearch.copy-default-plugins.initContainer" (dict "component" "master" "context" $) | nindent 8 }} + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: opensearch + image: {{ include "opensearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.master.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- else if .Values.security.enabled }} + command: + - bash + - -ec + - | + if [[ "$MY_POD_NAME" =~ -0$ ]]; then + export OPENSEARCH_SECURITY_BOOTSTRAP=true + # Opensearch securityadmin.sh requires the admin.key to be in PKCS8 format + openssl pkcs8 -topk8 -nocrypt -in "/opt/drycc/opensearch/config/certs/admin.key" > "/opt/drycc/opensearch/config/admin.key" + export OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION="/opt/drycc/opensearch/config/admin.key" + fi + init-stack /opt/drycc/scripts/opensearch/entrypoint.sh /opt/drycc/scripts/opensearch/run.sh + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPENSEARCH_IS_DEDICATED_NODE + value: {{ ternary "yes" "no" .Values.master.masterOnly | quote }} + - name: OPENSEARCH_NODE_ROLES + value: {{ prepend .Values.master.extraRoles "master" | join "," | quote }} + - name: OPENSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: OPENSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: OPENSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: OPENSEARCH_CLUSTER_HOSTS + value: {{ include "opensearch.hosts" . | quote }} + - name: OPENSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) (ternary .Values.data.autoscaling.hpa.minReplicas .Values.data.replicaCount .Values.data.autoscaling.hpa.enabled) | quote }} + - name: OPENSEARCH_CLUSTER_MASTER_HOSTS + {{- $opensearchMasterFullname := include "opensearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $opensearchMasterFullname $e }} {{ end }} + - name: OPENSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.hpa.minReplicas .Values.master.replicaCount .Values.master.autoscaling.hpa.enabled) 2) 1 | quote }} + - name: OPENSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "opensearch.master.servicename" .) | trunc 63 | trimSuffix "-" }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: OPENSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: OPENSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + {{- if .Values.master.heapSize }} + - name: OPENSEARCH_HEAP_SIZE + value: {{ .Values.master.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "opensearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - init-stack + - /opt/drycc/scripts/opensearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- else if ne .Values.master.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.master.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/config + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/tmp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/drycc/opensearch/plugins + subPath: app-plugins-dir + - name: data + mountPath: /drycc/opensearch/data + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + mountPath: /opt/drycc/opensearch/secrets + {{- end }} + {{- if .Values.config }} + - mountPath: /opt/drycc/opensearch/config/opensearch.yml + name: config + subPath: opensearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/drycc/opensearch/config/my_opensearch.yml + name: config + subPath: my_opensearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /opt/drycc/opensearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.snapshots.enabled }} + - mountPath: {{ include "opensearch.snapshots.mountPath" . }} + {{- if .Values.snapshots.persistence.enabled }} + name: snapshots + {{- else }} + name: empty-dir + subPath: app-snapshots-dir + {{- end }} + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if and .Values.usePasswordFiles .Values.security.enabled }} + - name: opensearch-secrets + projected: + sources: + - secret: + name: {{ include "opensearch.secretName" . }} + {{- if or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret }} + - secret: + name: {{ include "opensearch.tlsPasswordsSecret" . }} + {{- end }} + {{- end }} + {{- if .Values.security.enabled }} + - name: opensearch-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "master" "context" $) }} + items: {{- include "opensearch.node.tlsSecretItems" (dict "nodeRole" "master" "context" $) | nindent 20 }} + - secret: + name: {{ include "opensearch.admin.tlsSecretName" (dict "context" $) }} + items: {{- include "opensearch.admin.tlsSecretItems" (dict "context" $) | nindent 20 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "opensearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "opensearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "opensearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if and .Values.snapshots.enabled .Values.snapshots.persistence.enabled }} + - name: snapshots + persistentVolumeClaim: + claimName: {{ include "opensearch.snapshots.fullname" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: "data" + emptyDir: {} + {{- else if .Values.master.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.master.persistence.existingClaim }} + {{- else }} + {{- if .Values.master.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.master.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.master.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "data" + {{- if or .Values.master.persistence.annotations .Values.commonAnnotations }} + {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.master.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.master.persistence.existingVolume }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/svc-headless.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/svc-headless.yaml new file mode 100644 index 00000000..eb9798f1 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/svc-headless.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.master.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.master.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if or .Values.master.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if semverCompare ">=1.31-0" (include "common.capabilities.kubeVersion" .) }} + trafficDistribution: {{ .Values.master.service.headless.trafficDistribution }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/vpa.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/vpa.yaml new file mode 100644 index 00000000..94ef22d1 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/master/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.master.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "opensearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: master + {{- if or .Values.ingest.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingest.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: opensearch + {{- with .Values.master.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ include "opensearch.master.fullname" . }} + {{- if .Values.master.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.master.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/secrets.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/secrets.yaml new file mode 100644 index 00000000..ffeb0074 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/secrets.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.createSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + opensearch-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "opensearch-password" "length" 16 "providedValues" (list "security.adminPassword") "context" $) }} + opensearch-dashboards-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "opensearch-dashboards-password" "length" 16 "providedValues" (list "dashboards.password") "context" $) }} + logstash-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "logstash-password" "length" 16 "providedValues" (list "security.logstashPassword") "context" $) }} +{{- end }} +{{- if (include "opensearch.createTlsPasswordsSecret" . ) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls-pass" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.tls.keystorePassword }} + keystore-password: {{ default "" .Values.security.tls.keystorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + truststore-password: {{ default "" .Values.security.tls.truststorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keyPassword }} + key-password: {{ default "" .Values.security.tls.keyPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/service.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/service.yaml new file mode 100644 index 00000000..4cc3d057 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/service.yaml @@ -0,0 +1,65 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opensearch.service.name" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if (include "opensearch.coordinating.enabled" .) }} + app.kubernetes.io/component: coordinating-only + {{- else }} + app.kubernetes.io/component: master + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-rest-api + port: {{ include "opensearch.service.ports.restAPI" . }} + targetPort: rest-api + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.restAPI)) }} + nodePort: {{ .Values.service.nodePorts.restAPI }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: {{ .Values.service.ports.transport }} + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.transport)) }} + nodePort: {{ .Values.service.nodePorts.transport }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if (include "opensearch.coordinating.enabled" .) }} + app.kubernetes.io/component: coordinating-only + {{- else }} + app.kubernetes.io/component: master + {{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/init-job.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/init-job.yaml new file mode 100644 index 00000000..4eab9dc0 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/init-job.yaml @@ -0,0 +1,179 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if .Values.snapshots.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "opensearch.snapshots.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" . ) | nindent 4 }} + app.kubernetes.io/component: snapshots + {{- $defaultAnnotations := dict "helm.sh/hook" "post-install" "helm.sh/hook-delete-policy" "hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" . ) | nindent 4 }} +spec: + template: + spec: + {{- include "opensearch.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: false + initContainers: + - name: wait-for-opensearch + image: {{ template "opensearch.snapshots.image" . }} + imagePullPolicy: {{ .Values.snapshots.image.pullPolicy }} + {{- if .Values.snapshots.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.snapshots.containerSecurityContext "context" .) | nindent 12 }} + {{- end }} + {{- if .Values.snapshots.resources }} + resources: {{- toYaml .Values.snapshots.resources | nindent 12 }} + {{- else if ne .Values.snapshots.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.snapshots.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - bash + args: + - -ec + - | + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + check_opensearch() { + if wait-for-port --timeout=5 --host="$OPENSEARCH_HOST" --state=inuse $OPENSEARCH_PORT_NUMBER; then + return 0 + else + return 1 + fi + } + + echo "Checking connection to OpenSearch" + if retry_while "check_opensearch"; then + echo "Connected to OpenSearch" + exit 0 + else + echo "Error connecting to OpenSearch" + exit 1 + fi + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.snapshots.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: OPENSEARCH_HOST + value: {{ include "opensearch.service.name" . | quote }} + - name: OPENSEARCH_PORT_NUMBER + value: {{ include "opensearch.service.ports.restAPI" . | quote }} + containers: + - args: + - --silent + - --show-error + # Starting with curl 8, a URL is required before the first `--next` + - "file:///dev/null" + {{- range $name, $_ := .Values.snapshots.repositories }} + # Register snapshot repository: + - --next + - -w + - '\n' + - -XPUT + {{- if $.Values.security.enabled }} + - -u + - admin:$(OPENSEARCH_PASSWORD) + {{- if $.Values.security.tls.restEncryption }} + - --cacert + - /certs/ca.crt + {{- end }} + {{- end }} + - -H + - "Content-Type: application/json" + - -d + - "@/run/repo-data/{{ $name }}" + - "$(OPENSEARCH_ORIGIN)/_snapshot/{{ $name }}" + {{- end }} + {{- range $name, $_ := .Values.snapshots.policies }} + # Register snapshot policy: + - --next + - -w + - '\n' + {{- if $.Values.security.enabled }} + - -u + - admin:$(OPENSEARCH_PASSWORD) + {{- if $.Values.security.tls.restEncryption }} + - --cacert + - /certs/ca.crt + {{- end }} + {{- end }} + - -H + - "Content-Type: application/json" + - -d + - "@/run/policy-data/{{ $name }}" + - "$(OPENSEARCH_ORIGIN)/_plugins/_sm/policies/{{ $name }}" + {{- end }} + command: + {{- if .Values.snapshots.command }} + {{- include "common.tplvalues.render" (dict "value" .Values.snapshots.command "context" .) | nindent 12 }} + {{- else }} + - /usr/bin/curl + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.snapshots.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: OPENSEARCH_ORIGIN + value: {{ printf "%s://%s:%d" (ternary "https" "http" (and .Values.security.enabled .Values.security.tls.restEncryption)) (include "opensearch.service.name" .) (include "opensearch.service.ports.restAPI" . | int) | quote }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "opensearch.secretName" . }} + key: opensearch-password + image: {{ template "opensearch.snapshots.image" . }} + imagePullPolicy: {{ .Values.snapshots.image.pullPolicy }} + name: shell + {{- if .Values.snapshots.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.snapshots.containerSecurityContext "context" .) | nindent 12 }} + {{- end }} + {{- if .Values.snapshots.resources }} + resources: {{- toYaml .Values.snapshots.resources | nindent 12 }} + {{- else if ne .Values.snapshots.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.snapshots.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: repo-data + mountPath: /run/repo-data/ + - name: policy-data + mountPath: /run/policy-data/ + {{- if .Values.security.enabled }} + - name: opensearch-certificates + mountPath: /certs + readOnly: true + {{- end }} + restartPolicy: Never + {{- if .Values.dashboards.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.dashboards.podSecurityContext "context" .) | nindent 8 }} + {{- end }} + volumes: + - name: policy-data + configMap: + name: {{ include "opensearch.snapshots.policyDataConfigMap" . }} + - name: repo-data + configMap: + name: {{ include "opensearch.snapshots.repoDataConfigMap" . }} + {{- if and .Values.security.enabled .Values.security.tls.restEncryption }} + - name: opensearch-certificates + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "opensearch.node.tlsSecretName" (dict "nodeRole" "master" "context" $) }} + items: {{- include "opensearch.node.tlsSecretItems" (dict "nodeRole" "master" "context" $) | nindent 20 }} + {{- end }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/networkpolicy.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/networkpolicy.yaml new file mode 100644 index 00000000..87b2c000 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/networkpolicy.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.snapshots.enabled }} +--- +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: opensearch + app.kubernetes.io/component: snapshots + name: {{ template "opensearch.snapshots.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} +spec: + podSelector: + matchLabels: + batch.kubernetes.io/job-name: {{ template "opensearch.snapshots.fullname" . }} + policyTypes: + - Ingress + - Egress + ingress: [] + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if (include "opensearch.coordinating.enabled" .) }} + app.kubernetes.io/component: coordinating-only + {{- else }} + app.kubernetes.io/component: master + {{- end }} + ports: + - port: {{ include "opensearch.service.ports.restAPI" $ }} +{{- end }} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/policies-cm.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/policies-cm.yaml new file mode 100644 index 00000000..1dbbe8ed --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/policies-cm.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if $.Values.snapshots.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: snapshots + name: {{ include "opensearch.snapshots.policyDataConfigMap" . }} + namespace: {{ include "common.names.namespace" . | quote }} +data: + {{- range $name, $policy := $.Values.snapshots.policies }} + {{- $repo := $policy.snapshot_config.repository -}} + {{- if (hasKey $.Values.snapshots.repositories $repo) -}} + {{- $name | nindent 2 }}: | + {{- $policy | mustToPrettyJson | nindent 4 }} + {{- else -}} + {{- fail (printf "Repository `%s` is not defined in .snapshots.repositories" $repo) -}} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/pvc.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/pvc.yaml new file mode 100644 index 00000000..5c6d4194 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/pvc.yaml @@ -0,0 +1,37 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if and .Values.snapshots.persistence.enabled (not .Values.snapshots.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "opensearch.snapshots.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels .Values.snapshots.persistence.labels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: snapshots + annotations: + helm.sh/resource-policy: keep + {{- if or .Values.snapshots.persistence.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.snapshots.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.snapshots.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.snapshots.persistence.size | quote }} + {{- if .Values.snapshots.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.snapshots.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.snapshots.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.snapshots.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.snapshots.persistence "global" .Values.global) | nindent 2 }} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/repos-cm.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/repos-cm.yaml new file mode 100644 index 00000000..e64d3df5 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/snapshots/repos-cm.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{- if $.Values.snapshots.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: snapshots + name: {{ include "opensearch.snapshots.repoDataConfigMap" . }} + namespace: {{ include "common.names.namespace" . | quote }} +data: + {{- range $name, $repo := $.Values.snapshots.repositories }} + {{- if (eq $repo.type "fs") -}} + {{- $location := $repo.settings.location -}} + {{- if not (eq $location $.Values.snapshotRepoPath) -}} + {{- fail (printf "Location `%s` for repo `%s` must be allowed via .snapshotRepoPath" $location $name) -}} + {{- end -}} + {{- end -}} + {{ $name | nindent 2 }}: | + {{- $repo | mustToPrettyJson | nindent 4 }} + {{- end -}} +{{- end -}} diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/templates/tls-secret.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/templates/tls-secret.yaml new file mode 100644 index 00000000..15a0712a --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/templates/tls-secret.yaml @@ -0,0 +1,126 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "opensearch.createTlsSecret" .) }} +{{- $ca := genCA "opensearch-ca" 36500 }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $cert := genSignedCert "admin" nil nil 36500 $ca }} +{{- $secretAdminName := printf "%s-admin-crt" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretAdminName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + admin.crt: {{ include "common.secrets.lookup" (dict "secret" $secretAdminName "key" "admin.crt" "defaultValue" $cert.Cert "context" $) }} + admin.key: {{ include "common.secrets.lookup" (dict "secret" $secretAdminName "key" "admin.key" "defaultValue" $cert.Key "context" $) }} +{{- if and (include "opensearch.master.enabled" .) (not .Values.security.tls.master.existingSecret) }} +{{- $fullname := include "opensearch.master.fullname" . }} +{{- $serviceName := include "opensearch.master.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- if not (include "opensearch.coordinating.enabled" .) }} +{{- $altNames = append $altNames (include "opensearch.service.name" .) }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} +{{- end }} +{{- $cert := genSignedCert $fullname nil $altNames 36500 $ca }} +{{- $secretName := printf "%s-crt" (include "opensearch.master.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} +{{- $fullname := include "opensearch.data.fullname" . }} +{{- $serviceName := include "opensearch.data.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $cert := genSignedCert $fullname nil $altNames 36500 $ca }} +{{- $secretName := printf "%s-crt" (include "opensearch.data.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} +{{- $fullname := include "opensearch.coordinating.fullname" . }} +{{- $serviceName := include "opensearch.coordinating.servicename" . }} +{{- $altNames := list (include "opensearch.service.name" .) (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $cert := genSignedCert $fullname nil $altNames 36500 $ca }} +{{- $secretName := printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} +{{- $fullname := include "opensearch.ingest.fullname" . }} +{{- $serviceName := include "opensearch.ingest.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- if .Values.ingest.service.enabled }} +{{- $altNames = append $altNames (include "opensearch.ingest.fullname" .) }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" (include "opensearch.ingest.fullname" .) $releaseNamespace $clusterDomain) }} +{{- end }} +{{- $cert := genSignedCert $fullname nil $altNames 36500 $ca }} +{{- $secretName := printf "%s-crt" (include "opensearch.ingest.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} + diff --git a/addons/opensearch/3.0/chart/opensearch-3.0/values.yaml b/addons/opensearch/3.0/chart/opensearch-3.0/values.yaml new file mode 100644 index 00000000..7479f4d2 --- /dev/null +++ b/addons/opensearch/3.0/chart/opensearch-3.0/values.yaml @@ -0,0 +1,3714 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + storageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param apiVersions Override Kubernetes API versions reported by .Capabilities +## +apiVersions: [] +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param usePasswordFiles Mount credentials as files instead of using environment variables +## +usePasswordFiles: true +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section OpenSearch cluster Parameters + +## @param clusterName OpenSearch cluster name +## +clusterName: open +## @param containerPorts.restAPI OpenSearch REST API port +## @param containerPorts.transport OpenSearch Transport port +## +containerPorts: + restAPI: 9200 + transport: 9300 +## @param plugins Comma, semi-colon or space separated list of plugins to install at initialization +## ref: https://github.com/bitnami/containers/tree/main/bitnami/opensearch#environment-variables +## +plugins: "" +## @param snapshotRepoPath File System snapshot repository path +## ref: https://github.com/bitnami/containers/tree/main/bitnami/opensearch#environment-variables +## +snapshotRepoPath: "" +## @param config Override opensearch configuration +## +config: {} +## @param extraConfig Append extra configuration to the opensearch node configuration +## Use this instead of `config` to add more configuration +## See below example: +## extraConfig: +## node: +## store: +## allow_mmap: false +## ref: https://www.open.co/guide/en/opensearch/reference/current/settings.html +## +extraConfig: {} +## @param extraHosts A list of external hosts which are part of this cluster +## Example Use Case: When you have a cluster with nodes spanned across multiple K8s or namespaces +## extraHosts: +## - datacenter2-opensearch-master-hl.namespace2.svc +## - datacenter2-opensearch-data-hl.namespace2.svc +extraHosts: [] +## @param extraVolumes A list of volumes to be added to the pod +## Example Use Case: mount ssl certificates when opensearch has tls enabled +## extraVolumes: +## - name: es-certs +## secret: +## defaultMode: 420 +## secretName: es-certs +extraVolumes: [] +## @param extraVolumeMounts A list of volume mounts to be added to the pod +## extraVolumeMounts: +## - name: es-certs +## mountPath: /certs +## readOnly: true +extraVolumeMounts: [] +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraEnvVars Array containing extra env vars to be added to all pods (evaluated as a template) +## For example: +## extraEnvVars: +## - name: MY_ENV_VAR +## value: env_var_value +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsSecret: "" +## @param sidecars Add additional sidecar containers to the all opensearch node pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the all opensearch node pod(s) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## command: ['sh', '-c', 'echo "hello world"'] +## +initContainers: [] +## @param useIstioLabels Use this variable to add Istio labels to all pods +## +useIstioLabels: true +## Drycc OpenSearch image +## @param image.registry [default: REGISTRY_NAME] OpenSearch image registry +## @param image.repository [default: REPOSITORY_NAME/opensearch] OpenSearch image repository +## @skip image.tag OpenSearch image tag (immutable tags are recommended) +## @param image.digest OpenSearch image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy OpenSearch image pull policy +## @param image.pullSecrets OpenSearch image pull secrets +## @param image.debug Enable OpenSearch image debug mode +## +image: + registry: registry.drycc.cc + repository: drycc-addons/opensearch + tag: "3.0" + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## X-Pack security parameters +## Note: TLS configuration is required in order to configure password authentication +## +security: + ## @param security.enabled Enable X-Pack Security settings + ## + enabled: true + ## @param security.adminPassword Password for 'admin' user + ## Ref: https://github.com/bitnami/containers/tree/main/bitnami/opensearch#security + ## + adminPassword: "" + ## @param security.logstashPassword Password for Logstash + ## + logstashPassword: "" + ## @param security.existingSecret Name of the existing secret containing the OpenSearch password and + ## + existingSecret: "" + ## FIPS mode + ## @param security.fipsMode Configure opensearch with FIPS 140 compliant mode + ## Ref: https://www.open.co/guide/en/opensearch/reference/current/fips-140-compliance.html + ## + fipsMode: false + ## TLS configuration + ## + tls: + ## @section OpenSearch admin parameters + ## @param security.tls.admin.existingSecret Existing secret containing the certificates for admin + ## @param security.tls.admin.certKey Key containing the crt for admin certificate (defaults to admin.crt) + ## @param security.tls.admin.keyKey Key containing the key for admin certificate (defaults to admin.key) + ## + admin: + existingSecret: "" + certKey: "" + keyKey: "" + ## @param security.tls.restEncryption Enable SSL/TLS encryption for OpenSearch REST API. + ## + restEncryption: true + ## @param security.tls.autoGenerated Create self-signed TLS certificates. + ## NOTE: If autoGenerated certs are enabled and a new node type is enabled using helm upgrade, make sure you remove previously existing OpenSearch TLS secrets. + ## Otherwise, the new node certs won't match the existing certs. + ## + autoGenerated: true + ## @param security.tls.verificationMode Verification mode for SSL communications. + ## Supported values: full, certificate, none. + ## Ref: https://www.open.co/guide/en/opensearch/reference/current/security-settings.html + ## + verificationMode: "none" + ## TLS configuration for master nodes + ## + master: + ## @param security.tls.master.existingSecret Existing secret containing the certificates for the master nodes + ## @param security.tls.master.certKey Key containing the crt for master nodes certificate (defaults to tls.crt) + ## @param security.tls.master.keyKey Key containing the key for master nodes certificate (defaults to tls.key) + ## @param security.tls.master.caKey Key containing the ca for master nodes certificate (defaults to ca.crt) + ## + existingSecret: "" + certKey: "" + keyKey: "" + caKey: "" + ## TLS configuration for data nodes + ## + data: + ## @param security.tls.data.existingSecret Existing secret containing the certificates for the data nodes + ## @param security.tls.data.certKey Key containing the crt for data nodes certificate (defaults to tls.crt) + ## @param security.tls.data.keyKey Key containing the key for data nodes certificate (defaults to tls.key) + ## @param security.tls.data.caKey Key containing the ca for data nodes certificate (defaults to ca.crt) + ## + existingSecret: "" + certKey: "" + keyKey: "" + caKey: "" + ## TLS configuration for ingest nodes + ## + ingest: + ## @param security.tls.ingest.existingSecret Existing secret containing the certificates for the ingest nodes + ## @param security.tls.ingest.certKey Key containing the crt for ingest nodes certificate (defaults to tls.crt) + ## @param security.tls.ingest.keyKey Key containing the key for ingest nodes certificate (defaults to tls.key) + ## @param security.tls.ingest.caKey Key containing the ca for ingest nodes certificate (defaults to ca.crt) + ## + existingSecret: "" + certKey: "" + keyKey: "" + caKey: "" + ## TLS configuration for coordinating nodes + ## + coordinating: + ## @param security.tls.coordinating.existingSecret Existing secret containing the certificates for the coordinating nodes + ## @param security.tls.coordinating.certKey Key containing the crt for coordinating nodes certificate (defaults to tls.crt) + ## @param security.tls.coordinating.keyKey Key containing the key for coordinating nodes certificate (defaults to tls.key) + ## @param security.tls.coordinating.caKey Key containing the ca for coordinating nodes certificate (defaults to ca.crt) + ## + existingSecret: "" + certKey: "" + keyKey: "" + caKey: "" + ## @param security.tls.keystoreFilename Name of the keystore file + ## + keystoreFilename: opensearch.keystore.jks + ## @param security.tls.truststoreFilename Name of the truststore + ## + truststoreFilename: opensearch.truststore.jks + ## @param security.tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 + ## Ignored when using autoGenerated certs. + ## + usePemCerts: false + ## @param security.tls.passwordsSecret Existing secret containing the Keystore and Truststore passwords, or key password if PEM certs are used + ## + passwordsSecret: "" + ## @param security.tls.keystorePassword Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + keystorePassword: "" + ## @param security.tls.truststorePassword Password to access the JKS/PKCS12 truststore when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + truststorePassword: "" + ## @param security.tls.keyPassword Password to access the PEM key when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + keyPassword: "" + ## @param security.tls.secretKeystoreKey Name of the secret key containing the Keystore password + ## + secretKeystoreKey: "" + ## @param security.tls.secretTruststoreKey Name of the secret key containing the Truststore password + ## + secretTruststoreKey: "" + ## @param security.tls.secretKey Name of the secret key containing the PEM key password + ## + secretKey: "" + ## @param security.tls.nodesDN A comma separated list of DN for nodes + ## e.g. nodesDN: "O=Example CA,C=SE,UID=c-5ca04c9328c8208704310f7c2ed16414" + ## + ## + nodesDN: "" + ## @param security.tls.adminDN A comma separated list of DN for admins + ## + adminDN: "" + +## @section Traffic Exposure Parameters + +## OpenSearch service parameters +## +service: + ## @param service.type OpenSearch service type + ## + type: ClusterIP + ## @param service.ports.restAPI OpenSearch service REST API port + ## @param service.ports.transport OpenSearch service transport port + ## + ports: + restAPI: 9200 + transport: 9300 + ## Node ports to expose + ## @param service.nodePorts.restAPI Node port for REST API + ## @param service.nodePorts.transport Node port for REST API + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + restAPI: "" + transport: "" + ## @param service.clusterIP OpenSearch service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP OpenSearch service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges OpenSearch service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy OpenSearch service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for OpenSearch service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in OpenSearch service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} +## OpenSearch ingress parameters +## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for OpenSearch + ## + enabled: false + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: opensearch.local + ## @param ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: opensearch.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - opensearch.local + ## secretName: opensearch.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 36500 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: opensearch.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## @section Master-eligible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-eligible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 1 + + ## @param master.extraRoles Append extra roles to the node role + ## + extraRoles: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param master.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param master.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param master.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `master.pdb.minAvailable` and `master.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param master.nameOverride String to partially override opensearch.master.fullname + ## + nameOverride: "" + ## @param master.fullnameOverride String to fully override opensearch.master.fullname + ## + fullnameOverride: "" + ## @param master.servicenameOverride String to fully override opensearch.master.servicename + ## DEPRECATED: Use master.service.headless.nameOverride instead + ## + servicenameOverride: "" + ## @param master.annotations [object] Annotations for the master statefulset + ## + annotations: {} + ## @param master.updateStrategy.type Master-eligible nodes statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "medium" + ## @param master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 512m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled master-eligible pods' Security Context + ## @param master.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param master.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param master.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param master.podSecurityContext.fsGroup Set master-eligible pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled containers' Security Context + ## @param master.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param master.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param master.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param master.containerSecurityContext.privileged Set container's Security Context privileged + ## @param master.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param master.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param master.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param master.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param master.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param master.hostAliases master-eligible pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for master-eligible pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for master-eligible pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for master-eligible pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for master-eligible pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for master-eligible pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.priorityClassName master-eligible pods' priorityClassName + ## + priorityClassName: "" + ## @param master.schedulerName Name of the k8s scheduler (other than default) for master-eligible pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.terminationGracePeriodSeconds In seconds, time the given to the OpenSearch Master pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param master.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param master.podManagementPolicy podManagementPolicy to manage scaling operation of OpenSearch master pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for OpenSearch master-eligible containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable/disable the startup probe (master nodes pod) + ## @param master.startupProbe.initialDelaySeconds Delay before startup probe is initiated (master nodes pod) + ## @param master.startupProbe.periodSeconds How often to perform the probe (master nodes pod) + ## @param master.startupProbe.timeoutSeconds When the probe times out (master nodes pod) + ## @param master.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) + ## @param master.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable/disable the liveness probe (master-eligible nodes pod) + ## @param master.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (master-eligible nodes pod) + ## @param master.livenessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.livenessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable/disable the readiness probe (master-eligible nodes pod) + ## @param master.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (master-eligible nodes pod) + ## @param master.readinessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.readinessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.lifecycleHooks for the master-eligible container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraEnvVars Array with extra environment variables to add to master-eligible nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for master-eligible nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for master-eligible nodes + ## + extraEnvVarsSecret: "" + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the master-eligible pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the master-eligible container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the master-eligible pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the master-eligible pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param master.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. + ## + existingVolume: "" + ## @param master.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param master.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 8Gi + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param master.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param master.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param master.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: false + annotations: {} + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param master.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param master.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports OpenSearch is + ## listening on. When true, OpenSearch will accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param master.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param master.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param master.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param master.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param master.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + autoscaling: + vpa: + ## @param master.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param master.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param master.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param master.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param master.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param master.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param master.autoscaling.hpa.enabled Enable HPA for APISIX Data Plane + ## + enabled: false + ## @param master.autoscaling.hpa.minReplicas Minimum number of APISIX Data Plane replicas + ## + minReplicas: 3 + ## @param master.autoscaling.hpa.maxReplicas Maximum number of APISIX Data Plane replicas + ## + maxReplicas: 11 + ## @param master.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param master.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## Master-eligible service parameters + ## + service: + ## Headless service properties + ## + headless: + ## @param master.service.headless.annotations Annotations for the Master-eligible headless service. + ## + annotations: {} + ## @param master.service.headless.nameOverride String to fully override opensearch.master.servicename + ## + nameOverride: "" + ## @param master.service.headless.trafficDistribution String Traffic distribution for the master headless service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + ## + trafficDistribution: "PreferClose" + ## Metrics configuration for master-eligible node + ## + metrics: + ## @param master.metrics.enabled Enable master-eligible node metrics + ## + enabled: false + service: + ## @param master.metrics.service.ports.metrics master-eligible node metrics service port + ## + ports: + metrics: 80 + ## @param master.metrics.service.clusterIP master-eligible node metrics service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## master-eligible node metrics service monitor configuration + ## + serviceMonitor: + ## @param master.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param master.metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## e.g: + ## namespace: monitoring + ## + namespace: "" + ## @param master.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param master.metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: 30s + ## @param master.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: 10s + ## @param master.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param master.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param master.metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param master.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Rules for PrometheusRule object if enabled + ## + ## E.g. + ## @param master.metrics.rules.enabled Enable render extra rules for PrometheusRule object + ## @param master.metrics.rules.spec Rules to render into the PrometheusRule object + ## @param master.metrics.rules.selector Selector for the PrometheusRule object + ## @param master.metrics.rules.namespace Namespace where to create the PrometheusRule object + ## @param master.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object + ## + rules: + enabled: false + ## E.g + ## - alert: ArgoAppMissing + ## expr: | + ## absent(argocd_app_info) + ## for: 15m + ## labels: + ## severity: critical + ## annotations: + ## summary: "[ArgoCD] No reported applications" + ## description: > + ## ArgoCD has not reported any applications data for the past 15 minutes which + ## means that it must be down or not functioning properly. This needs to be + ## resolved for this cloud to continue to maintain state. + ## - alert: ArgoAppNotSynced + ## expr: | + ## argocd_app_info{sync_status!="Synced"} == 1 + ## for: 12h + ## labels: + ## severity: warning + ## annotations: + ## summary: "[{{`{{ $labels.name }}`}}] Application not synchronized" + ## description: > + ## The application [{{`{{ $labels.name }}`}} has not been synchronized for over + ## 12 hours which means that the state of this cloud has drifted away from the + ## state inside Git. + ## + spec: [] + ## E.g + ## selector: + ## prometheus: kube-prometheus + ## + selector: {} + namespace: monitoring + additionalLabels: {} + +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + + ## @param data.extraRoles Append extra roles to the node role + ## + extraRoles: [] + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param data.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param data.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param data.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `data.pdb.minAvailable` and `data.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param data.nameOverride String to partially override opensearch.data.fullname + ## + nameOverride: "" + ## @param data.fullnameOverride String to fully override opensearch.data.fullname + ## + fullnameOverride: "" + ## @param data.servicenameOverride String to fully override opensearch.data.servicename + ## DEPRECATED: Use data.service.headless.nameOverride instead + ## + servicenameOverride: "" + ## @param data.annotations [object] Annotations for the data statefulset + ## + annotations: {} + ## @param data.updateStrategy.type Data-only nodes statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param data.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if data.resources is set (data.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "medium" + ## @param data.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param data.heapSize OpenSearch data node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 1024m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param data.podSecurityContext.enabled Enabled data pods' Security Context + ## @param data.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param data.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param data.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param data.podSecurityContext.fsGroup Set data pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param data.containerSecurityContext.enabled Enabled containers' Security Context + ## @param data.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param data.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param data.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param data.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param data.containerSecurityContext.privileged Set container's Security Context privileged + ## @param data.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param data.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param data.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param data.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param data.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param data.hostAliases data pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param data.podLabels Extra labels for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param data.podAnnotations Annotations for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param data.podAffinityPreset Pod affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param data.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param data.nodeAffinityPreset.type Node affinity preset type. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param data.nodeAffinityPreset.key Node label key to match. Ignored if `data.affinity` is set + ## + key: "" + ## @param data.nodeAffinityPreset.values Node label values to match. Ignored if `data.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param data.affinity Affinity for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `data.podAffinityPreset`, `data.podAntiAffinityPreset`, and `data.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param data.nodeSelector Node labels for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param data.tolerations Tolerations for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param data.priorityClassName data pods' priorityClassName + ## + priorityClassName: "" + ## @param data.schedulerName Name of the k8s scheduler (other than default) for data pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param data.terminationGracePeriodSeconds In seconds, time the given to the OpenSearch data pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param data.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param data.podManagementPolicy podManagementPolicy to manage scaling operation of OpenSearch data pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for OpenSearch data containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param data.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param data.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param data.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param data.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param data.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param data.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param data.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param data.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param data.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param data.command Override default container command (useful when using custom images) + ## + command: [] + ## @param data.args Override default container args (useful when using custom images) + ## + args: [] + ## @param data.lifecycleHooks for the data container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param data.extraEnvVars Array with extra environment variables to add to data nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param data.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data nodes + ## + extraEnvVarsCM: "" + ## @param data.extraEnvVarsSecret Name of existing Secret containing extra env vars for data nodes + ## + extraEnvVarsSecret: "" + ## @param data.extraVolumes Optionally specify extra list of additional volumes for the data pod(s) + ## + extraVolumes: [] + ## @param data.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the data container(s) + ## + extraVolumeMounts: [] + ## @param data.sidecars Add additional sidecar containers to the data pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param data.initContainers Add additional init containers to the data pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` is set. + ## + existingVolume: "" + ## @param data.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param data.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param data.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param data.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param data.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param data.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: false + annotations: {} + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param data.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param data.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports OpenSearch is + ## listening on. When true, OpenSearch will accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param data.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param data.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param data.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param data.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param data.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + autoscaling: + vpa: + ## @param data.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param data.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param data.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param data.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param data.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param data.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param data.autoscaling.hpa.enabled Enable HPA for APISIX Data Plane + ## + enabled: false + ## @param data.autoscaling.hpa.minReplicas Minimum number of APISIX Data Plane replicas + ## + minReplicas: 3 + ## @param data.autoscaling.hpa.maxReplicas Maximum number of APISIX Data Plane replicas + ## + maxReplicas: 11 + ## @param data.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param data.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## Data service parameters + ## + service: + ## Headless service properties + ## + headless: + ## @param data.service.headless.annotations Annotations for the data headless service. + ## + annotations: {} + ## @param data.service.headless.nameOverride String to fully override opensearch.data.servicename + ## + nameOverride: "" + ## @param data.service.headless.trafficDistribution String Traffic distribution for the data headless service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + ## + trafficDistribution: "PreferClose" + ## Metrics configuration for data node + ## + metrics: + ## @param data.metrics.enabled Enable data node metrics + ## + enabled: false + service: + ## @param data.metrics.service.ports.metrics data node metrics service port + ## + ports: + metrics: 80 + ## @param data.metrics.service.clusterIP data node metrics service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## data node metrics service monitor configuration + ## + serviceMonitor: + ## @param data.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param data.metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## e.g: + ## namespace: monitoring + ## + namespace: "" + ## @param data.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param data.metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/data/Documentation/api.md#endpoint + ## + interval: 30s + ## @param data.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/data/Documentation/api.md#endpoint + ## + scrapeTimeout: 10s + ## @param data.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/data/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param data.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/data/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param data.metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param data.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Rules for PrometheusRule object if enabled + ## + ## E.g. + ## @param data.metrics.rules.enabled Enable render extra rules for PrometheusRule object + ## @param data.metrics.rules.spec Rules to render into the PrometheusRule object + ## @param data.metrics.rules.selector Selector for the PrometheusRule object + ## @param data.metrics.rules.namespace Namespace where to create the PrometheusRule object + ## @param data.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object + ## + rules: + enabled: false + ## E.g + ## - alert: ArgoAppMissing + ## expr: | + ## absent(argocd_app_info) + ## for: 15m + ## labels: + ## severity: critical + ## annotations: + ## summary: "[ArgoCD] No reported applications" + ## description: > + ## ArgoCD has not reported any applications data for the past 15 minutes which + ## means that it must be down or not functioning properly. This needs to be + ## resolved for this cloud to continue to maintain state. + ## - alert: ArgoAppNotSynced + ## expr: | + ## argocd_app_info{sync_status!="Synced"} == 1 + ## for: 12h + ## labels: + ## severity: warning + ## annotations: + ## summary: "[{{`{{ $labels.name }}`}}] Application not synchronized" + ## description: > + ## The application [{{`{{ $labels.name }}`}} has not been synchronized for over + ## 12 hours which means that the state of this cloud has drifted away from the + ## state inside Git. + ## + spec: [] + ## E.g + ## selector: + ## prometheus: kube-prometheus + ## + selector: {} + namespace: monitoring + additionalLabels: {} + +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + + ## @param coordinating.extraRoles Append extra roles to the node role + ## NOTE: In OpenSearch, all nodes act as coordinators, coordinating-only nodes do not have any other role by default. + ## + extraRoles: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param coordinating.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param coordinating.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param coordinating.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `coordinating.pdb.minAvailable` and `coodinating.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param coordinating.nameOverride String to partially override opensearch.coordinating.fullname + ## + nameOverride: "" + ## @param coordinating.fullnameOverride String to fully override opensearch.coordinating.fullname + ## + fullnameOverride: "" + ## @param coordinating.servicenameOverride String to fully override opensearch.coordinating.servicename + ## DEPRECATED: Use coordinating.service.headless.nameOverride instead + ## + servicenameOverride: "" + ## @param coordinating.annotations [object] Annotations for the coordinating-only statefulset + ## + annotations: {} + ## @param coordinating.updateStrategy.type Coordinating-only nodes statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param coordinating.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if coordinating.resources is set (coordinating.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "medium" + ## @param coordinating.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param coordinating.heapSize OpenSearch coordinating node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 512m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param coordinating.podSecurityContext.enabled Enabled coordinating-only pods' Security Context + ## @param coordinating.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param coordinating.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param coordinating.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param coordinating.podSecurityContext.fsGroup Set coordinating-only pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param coordinating.containerSecurityContext.enabled Enabled containers' Security Context + ## @param coordinating.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param coordinating.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param coordinating.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param coordinating.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param coordinating.containerSecurityContext.privileged Set container's Security Context privileged + ## @param coordinating.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param coordinating.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param coordinating.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param coordinating.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param coordinating.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param coordinating.hostAliases coordinating-only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param coordinating.podLabels Extra labels for coordinating-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param coordinating.podAnnotations Annotations for coordinating-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param coordinating.podAffinityPreset Pod affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param coordinating.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node coordinating.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param coordinating.nodeAffinityPreset.type Node affinity preset type. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param coordinating.nodeAffinityPreset.key Node label key to match. Ignored if `coordinating.affinity` is set + ## + key: "" + ## @param coordinating.nodeAffinityPreset.values Node label values to match. Ignored if `coordinating.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param coordinating.affinity Affinity for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `coordinating.podAffinityPreset`, `coordinating.podAntiAffinityPreset`, and `coordinating.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param coordinating.nodeSelector Node labels for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param coordinating.tolerations Tolerations for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param coordinating.priorityClassName coordinating-only pods' priorityClassName + ## + priorityClassName: "" + ## @param coordinating.schedulerName Name of the k8s scheduler (other than default) for coordinating-only pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param coordinating.terminationGracePeriodSeconds In seconds, time the given to the OpenSearch coordinating pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param coordinating.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param coordinating.podManagementPolicy podManagementPolicy to manage scaling operation of OpenSearch coordinating pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for OpenSearch coordinating-only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param coordinating.startupProbe.enabled Enable/disable the startup probe (coordinating-only nodes pod) + ## @param coordinating.startupProbe.initialDelaySeconds Delay before startup probe is initiated (coordinating-only nodes pod) + ## @param coordinating.startupProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.startupProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.livenessProbe.enabled Enable/disable the liveness probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.readinessProbe.enabled Enable/disable the readiness probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param coordinating.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param coordinating.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param coordinating.command Override default container command (useful when using custom images) + ## + command: [] + ## @param coordinating.args Override default container args (useful when using custom images) + ## + args: [] + ## @param coordinating.lifecycleHooks for the coordinating-only container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param coordinating.extraEnvVars Array with extra environment variables to add to coordinating-only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param coordinating.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for coordinating-only nodes + ## + extraEnvVarsCM: "" + ## @param coordinating.extraEnvVarsSecret Name of existing Secret containing extra env vars for coordinating-only nodes + ## + extraEnvVarsSecret: "" + ## @param coordinating.extraVolumes Optionally specify extra list of additional volumes for the coordinating-only pod(s) + ## + extraVolumes: [] + ## @param coordinating.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the coordinating-only container(s) + ## + extraVolumeMounts: [] + ## @param coordinating.sidecars Add additional sidecar containers to the coordinating-only pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param coordinating.initContainers Add additional init containers to the coordinating-only pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param coordinating.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param coordinating.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param coordinating.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param coordinating.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: false + annotations: {} + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param coordinating.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param coordinating.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports OpenSearch is + ## listening on. When true, OpenSearch will accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param coordinating.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param coordinating.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param coordinating.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param coordinating.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param coordinating.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + autoscaling: + vpa: + ## @param coordinating.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param coordinating.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param coordinating.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param coordinating.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param coordinating.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param coordinating.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param coordinating.autoscaling.hpa.enabled Enable HPA for APISIX Data Plane + ## + enabled: false + ## @param coordinating.autoscaling.hpa.minReplicas Minimum number of APISIX Data Plane replicas + ## + minReplicas: 3 + ## @param coordinating.autoscaling.hpa.maxReplicas Maximum number of APISIX Data Plane replicas + ## + maxReplicas: 11 + ## @param coordinating.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param coordinating.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## Coordinating-only service parameters + ## + service: + ## Headless service properties + ## + headless: + ## @param coordinating.service.headless.annotations Annotations for the coordinating-only headless service. + ## + annotations: {} + ## @param coordinating.service.headless.nameOverride String to fully override opensearch.coordinating.servicename + ## + nameOverride: "" + ## @param coordinating.service.headless.trafficDistribution String Traffic distribution for the coordinating headless service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + ## + trafficDistribution: "PreferClose" + ## Metrics configuration for coordinating node + ## + metrics: + ## @param coordinating.metrics.enabled Enable coordinating node metrics + ## + enabled: false + service: + ## @param coordinating.metrics.service.ports.metrics coordinating node metrics service port + ## + ports: + metrics: 80 + ## @param coordinating.metrics.service.clusterIP coordinating node metrics service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## coordinating node metrics service monitor configuration + ## + serviceMonitor: + ## @param coordinating.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param coordinating.metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## e.g: + ## namespace: monitoring + ## + namespace: "" + ## @param coordinating.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param coordinating.metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/coordinating/Documentation/api.md#endpoint + ## + interval: 30s + ## @param coordinating.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/coordinating/Documentation/api.md#endpoint + ## + scrapeTimeout: 10s + ## @param coordinating.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/coordinating/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param coordinating.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/coordinating/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param coordinating.metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param coordinating.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Rules for PrometheusRule object if enabled + ## + ## E.g. + ## @param coordinating.metrics.rules.enabled Enable render extra rules for PrometheusRule object + ## @param coordinating.metrics.rules.spec Rules to render into the PrometheusRule object + ## @param coordinating.metrics.rules.selector Selector for the PrometheusRule object + ## @param coordinating.metrics.rules.namespace Namespace where to create the PrometheusRule object + ## @param coordinating.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object + ## + rules: + enabled: false + ## E.g + ## - alert: ArgoAppMissing + ## expr: | + ## absent(argocd_app_info) + ## for: 15m + ## labels: + ## severity: critical + ## annotations: + ## summary: "[ArgoCD] No reported applications" + ## description: > + ## ArgoCD has not reported any applications data for the past 15 minutes which + ## means that it must be down or not functioning properly. This needs to be + ## resolved for this cloud to continue to maintain state. + ## - alert: ArgoAppNotSynced + ## expr: | + ## argocd_app_info{sync_status!="Synced"} == 1 + ## for: 12h + ## labels: + ## severity: warning + ## annotations: + ## summary: "[{{`{{ $labels.name }}`}}] Application not synchronized" + ## description: > + ## The application [{{`{{ $labels.name }}`}} has not been synchronized for over + ## 12 hours which means that the state of this cloud has drifted away from the + ## state inside Git. + ## + spec: [] + ## E.g + ## selector: + ## prometheus: kube-prometheus + ## + selector: {} + namespace: monitoring + additionalLabels: {} + +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + + ## @param ingest.extraRoles Append extra roles to the node role + ## + extraRoles: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param ingest.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param ingest.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param ingest.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `ingest.pdb.minAvailable` and `ingest.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param ingest.nameOverride String to partially override opensearch.ingest.fullname + ## + nameOverride: "" + ## @param ingest.fullnameOverride String to fully override opensearch.ingest.fullname + ## + fullnameOverride: "" + ## @param ingest.servicenameOverride String to fully override opensearch.ingest.servicename + ## DEPRECATED: Use ingest.service.headless.nameOverride instead + ## + servicenameOverride: "" + ## @param ingest.annotations [object] Annotations for the ingest statefulset + ## + annotations: {} + ## @param ingest.updateStrategy.type Ingest-only nodes statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param ingest.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if ingest.resources is set (ingest.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "medium" + ## @param ingest.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param ingest.heapSize OpenSearch ingest-only node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 512m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param ingest.podSecurityContext.enabled Enabled ingest-only pods' Security Context + ## @param ingest.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param ingest.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param ingest.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param ingest.podSecurityContext.fsGroup Set ingest-only pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param ingest.containerSecurityContext.enabled Enabled containers' Security Context + ## @param ingest.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param ingest.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param ingest.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param ingest.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param ingest.containerSecurityContext.privileged Set container's Security Context privileged + ## @param ingest.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param ingest.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param ingest.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param ingest.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param ingest.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param ingest.hostAliases ingest-only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingest.podLabels Extra labels for ingest-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param ingest.podAnnotations Annotations for ingest-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param ingest.podAffinityPreset Pod affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param ingest.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node ingest.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param ingest.nodeAffinityPreset.type Node affinity preset type. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param ingest.nodeAffinityPreset.key Node label key to match. Ignored if `ingest.affinity` is set + ## + key: "" + ## @param ingest.nodeAffinityPreset.values Node label values to match. Ignored if `ingest.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingest.affinity Affinity for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `ingest.podAffinityPreset`, `ingest.podAntiAffinityPreset`, and `ingest.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param ingest.nodeSelector Node labels for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param ingest.tolerations Tolerations for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingest.priorityClassName ingest-only pods' priorityClassName + ## + priorityClassName: "" + ## @param ingest.schedulerName Name of the k8s scheduler (other than default) for ingest-only pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param ingest.terminationGracePeriodSeconds In seconds, time the given to the OpenSearch ingest pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param ingest.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param ingest.podManagementPolicy podManagementPolicy to manage scaling operation of OpenSearch ingest pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for OpenSearch ingest-only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param ingest.startupProbe.enabled Enable/disable the startup probe (ingest-only nodes pod) + ## @param ingest.startupProbe.initialDelaySeconds Delay before startup probe is initiated (ingest-only nodes pod) + ## @param ingest.startupProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.startupProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.livenessProbe.enabled Enable/disable the liveness probe (ingest-only nodes pod) + ## @param ingest.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (ingest-only nodes pod) + ## @param ingest.livenessProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.livenessProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.readinessProbe.enabled Enable/disable the readiness probe (ingest-only nodes pod) + ## @param ingest.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (ingest-only nodes pod) + ## @param ingest.readinessProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.readinessProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param ingest.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param ingest.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param ingest.command Override default container command (useful when using custom images) + ## + command: [] + ## @param ingest.args Override default container args (useful when using custom images) + ## + args: [] + ## @param ingest.lifecycleHooks for the ingest-only container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param ingest.extraEnvVars Array with extra environment variables to add to ingest-only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param ingest.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ingest-only nodes + ## + extraEnvVarsCM: "" + ## @param ingest.extraEnvVarsSecret Name of existing Secret containing extra env vars for ingest-only nodes + ## + extraEnvVarsSecret: "" + ## @param ingest.extraVolumes Optionally specify extra list of additional volumes for the ingest-only pod(s) + ## + extraVolumes: [] + ## @param ingest.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ingest-only container(s) + ## + extraVolumeMounts: [] + ## @param ingest.sidecars Add additional sidecar containers to the ingest-only pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param ingest.initContainers Add additional init containers to the ingest-only pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param ingest.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param ingest.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param ingest.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param ingest.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: false + annotations: {} + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param ingest.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param ingest.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports OpenSearch is + ## listening on. When true, OpenSearch will accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param ingest.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param ingest.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param ingest.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param ingest.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param ingest.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + autoscaling: + vpa: + ## @param ingest.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param ingest.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param ingest.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param ingest.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param ingest.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param ingest.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param ingest.autoscaling.hpa.enabled Enable HPA for APISIX Data Plane + ## + enabled: false + ## @param ingest.autoscaling.hpa.minReplicas Minimum number of APISIX Data Plane replicas + ## + minReplicas: 3 + ## @param ingest.autoscaling.hpa.maxReplicas Maximum number of APISIX Data Plane replicas + ## + maxReplicas: 11 + ## @param ingest.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param ingest.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## OpenSearch Ingest-only Service + ## Recommended for heavy ingestion, improves performance by sending ingest traffic directly into the ingest nodes. + ## NOTE: Ingest nodes will only accept index requests with an associated pipeline, any other request won't be rerouted. + ## + service: + ## @param ingest.service.enabled Enable Ingest-only service + ## + enabled: false + ## @param ingest.service.type OpenSearch ingest-only service type + ## + type: ClusterIP + ## @param ingest.service.ports.restAPI OpenSearch service REST API port + ## @param ingest.service.ports.transport OpenSearch service transport port + ## + ports: + restAPI: 9200 + transport: 9300 + ## Node ports to expose + ## @param ingest.service.nodePorts.restAPI Node port for REST API + ## @param ingest.service.nodePorts.transport Node port for REST API + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + restAPI: "" + transport: "" + ## @param ingest.service.clusterIP OpenSearch ingest-only service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param ingest.service.loadBalancerIP OpenSearch ingest-only service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param ingest.service.loadBalancerSourceRanges OpenSearch ingest-only service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param ingest.service.externalTrafficPolicy OpenSearch ingest-only service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param ingest.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param ingest.service.annotations Additional custom annotations for OpenSearch ingest-only service + ## + annotations: {} + ## @param ingest.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param ingest.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param ingest.service.headless.annotations Annotations for the ingest headless service. + ## + annotations: {} + ## @param ingest.service.headless.nameOverride String to fully override opensearch.ingest.servicename + ## + nameOverride: "" + ## @param ingest.service.headless.trafficDistribution String Traffic distribution for the ingest headless service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + ## + trafficDistribution: "PreferClose" + ## OpenSearch Ingest-only ingress parameters + ## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + ## @param ingest.ingress.enabled Enable ingress record generation for OpenSearch + ## + enabled: false + ## @param ingest.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingest.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingest.ingress.hostname Default host for the ingress record + ## + hostname: opensearch-ingest.local + ## @param ingest.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingest.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingest.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingest.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingest.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingest.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: opensearch.local + ## path: / + ## + extraHosts: [] + ## @param ingest.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingest.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - opensearch.local + ## secretName: opensearch.local-tls + ## + extraTls: [] + ## @param ingest.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: opensearch.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingest.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + ## Metrics configuration for ingest node + ## + metrics: + ## @param ingest.metrics.enabled Enable ingest node metrics + ## + enabled: false + service: + ## @param ingest.metrics.service.ports.metrics ingest node metrics service port + ## + ports: + metrics: 80 + ## @param ingest.metrics.service.clusterIP ingest node metrics service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## ingest node metrics service monitor configuration + ## + serviceMonitor: + ## @param ingest.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param ingest.metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## e.g: + ## namespace: monitoring + ## + namespace: "" + ## @param ingest.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param ingest.metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/ingest/Documentation/api.md#endpoint + ## + interval: 30s + ## @param ingest.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/ingest/Documentation/api.md#endpoint + ## + scrapeTimeout: 10s + ## @param ingest.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/ingest/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param ingest.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/ingest/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param ingest.metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param ingest.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Rules for PrometheusRule object if enabled + ## + ## E.g. + ## @param ingest.metrics.rules.enabled Enable render extra rules for PrometheusRule object + ## @param ingest.metrics.rules.spec Rules to render into the PrometheusRule object + ## @param ingest.metrics.rules.selector Selector for the PrometheusRule object + ## @param ingest.metrics.rules.namespace Namespace where to create the PrometheusRule object + ## @param ingest.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object + ## + rules: + enabled: false + ## E.g + ## - alert: ArgoAppMissing + ## expr: | + ## absent(argocd_app_info) + ## for: 15m + ## labels: + ## severity: critical + ## annotations: + ## summary: "[ArgoCD] No reported applications" + ## description: > + ## ArgoCD has not reported any applications data for the past 15 minutes which + ## means that it must be down or not functioning properly. This needs to be + ## resolved for this cloud to continue to maintain state. + ## - alert: ArgoAppNotSynced + ## expr: | + ## argocd_app_info{sync_status!="Synced"} == 1 + ## for: 12h + ## labels: + ## severity: warning + ## annotations: + ## summary: "[{{`{{ $labels.name }}`}}] Application not synchronized" + ## description: > + ## The application [{{`{{ $labels.name }}`}} has not been synchronized for over + ## 12 hours which means that the state of this cloud has drifted away from the + ## state inside Git. + ## + spec: [] + ## E.g + ## selector: + ## prometheus: kube-prometheus + ## + selector: {} + namespace: monitoring + additionalLabels: {} + +## @section Init Container Parameters + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name + ## @skip volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: "bookworm" + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} +## Kernel settings modifier image +## +sysctlImage: + ## @param sysctlImage.enabled Enable kernel settings modifier image + ## + enabled: false + ## @param sysctlImage.registry [default: REGISTRY_NAME] Kernel settings modifier image registry + ## @param sysctlImage.repository [default: REPOSITORY_NAME/os-shell] Kernel settings modifier image repository + ## @skip sysctlImage.tag Kernel settings modifier image tag + ## @param sysctlImage.digest Kernel settings modifier image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctlImage.pullPolicy Kernel settings modifier image pull policy + ## @param sysctlImage.pullSecrets Kernel settings modifier image pull secrets + ## + registry: docker.io + repository: busybox + tag: latest + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param sysctlImage.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if sysctlImage.resources is set (sysctlImage.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param sysctlImage.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + +## @section OpenSearch Dashboards Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## Bitnami OpenSearch Dashboards image + ## @param dashboards.image.registry [default: REGISTRY_NAME] OpenSearch Dashboards image registry + ## @param dashboards.image.repository [default: REPOSITORY_NAME/opensearch-dashboards] OpenSearch Dashboards image repository + ## @skip dashboards.image.tag OpenSearch Dashboards image tag (immutable tags are recommended) + ## @param dashboards.image.digest OpenSearch Dashboards image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param dashboards.image.pullPolicy OpenSearch Dashboards image pull policy + ## @param dashboards.image.pullSecrets OpenSearch Dashboards image pull secrets + ## @param dashboards.image.debug Enable OpenSearch Dashboards image debug mode + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/opensearch-dashboards + tag: "3.0" + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## OpenSearch Dashboards service parameters + ## + service: + ## @param dashboards.service.type OpenSearch Dashboards service type + ## + type: ClusterIP + ## @param dashboards.service.ports.http OpenSearch Dashboards service web UI port + ## + ports: + http: 5601 + ## Node ports to expose + ## @param dashboards.service.nodePorts.http Node port for web UI + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param dashboards.service.clusterIP OpenSearch Dashboards service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param dashboards.service.loadBalancerIP OpenSearch Dashboards service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param dashboards.service.loadBalancerSourceRanges OpenSearch Dashboards service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param dashboards.service.externalTrafficPolicy OpenSearch Dashboards service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param dashboards.service.annotations Additional custom annotations for OpenSearch Dashboards service + ## + annotations: {} + ## @param dashboards.service.extraPorts Extra ports to expose in OpenSearch Dashboards service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param dashboards.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param dashboards.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param dashboards.service.nameOverride String to fully override opensearch.dashboards.servicename + ## + nameOverride: "" + ## OpenSearch Dashboards ingress parameters + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + ## @param dashboards.ingress.enabled Enable ingress record generation for OpenSearch Dashboards + ## + enabled: false + ## @param dashboards.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param dashboards.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param dashboards.ingress.hostname Default host for the ingress record + ## + hostname: opensearch-dashboards.local + ## @param dashboards.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param dashboards.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param dashboards.ingress.tls Enable TLS configuration for the host defined at `dashboards.ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param dashboards.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param dashboards.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param dashboards.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: opensearch-dashboards.local + ## path: / + ## + extraHosts: [] + ## @param dashboards.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param dashboards.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - opensearch-dashboards.local + ## secretName: opensearch-dashboards.local-tls + ## + extraTls: [] + ## @param dashboards.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set, and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set, and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: opensearch-dashboards.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param dashboards.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + ## @param dashboards.containerPorts.http OpenSearch Dashboards HTTP port + ## + containerPorts: + http: 5601 + ## @param dashboards.password Password for OpenSearch Dashboards + ## + password: "" + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param dashboards.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param dashboards.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param dashboards.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `dashboards.pdb.minAvailable` and `dashboards.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param dashboards.nameOverride String to partially override opensearch.dashboards.fullname + ## + nameOverride: "" + ## @param dashboards.fullnameOverride String to fully override opensearch.dashboards.fullname + ## + fullnameOverride: "" + ## @param dashboards.servicenameOverride String to fully override opensearch.dashboards.servicename + ## DEPRECATED: Use dashboards.service.nameOverride instead + ## + servicenameOverride: "" + ## @param dashboards.updateStrategy.type Data-only nodes statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param dashboards.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dashboards.resources is set (dashboards.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param dashboards.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param dashboards.heapSize OpenSearch data node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 1024m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param dashboards.podSecurityContext.enabled Enabled data pods' Security Context + ## @param dashboards.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param dashboards.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param dashboards.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param dashboards.podSecurityContext.fsGroup Set dashboards pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param dashboards.containerSecurityContext.enabled Enabled containers' Security Context + ## @param dashboards.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param dashboards.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param dashboards.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param dashboards.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param dashboards.containerSecurityContext.privileged Set container's Security Context privileged + ## @param dashboards.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param dashboards.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param dashboards.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param dashboards.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param dashboards.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param dashboards.hostAliases data pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param dashboards.podLabels Extra labels for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param dashboards.podAnnotations Annotations for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param dashboards.podAffinityPreset Pod affinity preset. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param dashboards.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node dashboards.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param dashboards.nodeAffinityPreset.type Node affinity preset type. Ignored if `dashboards.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param dashboards.nodeAffinityPreset.key Node label key to match. Ignored if `dashboards.affinity` is set + ## + key: "" + ## @param dashboards.nodeAffinityPreset.values Node label values to match. Ignored if `dashboards.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param dashboards.affinity Affinity for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `dashboards.podAffinityPreset`, `dashboards.podAntiAffinityPreset`, and `dashboards.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param dashboards.nodeSelector Node labels for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param dashboards.tolerations Tolerations for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param dashboards.priorityClassName data pods' priorityClassName + ## + priorityClassName: "" + ## @param dashboards.schedulerName Name of the k8s scheduler (other than default) for data pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param dashboards.terminationGracePeriodSeconds In seconds, time the given to the OpenSearch data pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param dashboards.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Configure extra options for OpenSearch data containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param dashboards.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param dashboards.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param dashboards.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param dashboards.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param dashboards.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param dashboards.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param dashboards.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param dashboards.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param dashboards.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param dashboards.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param dashboards.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param dashboards.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 180 + periodSeconds: 20 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 8 + ## @param dashboards.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param dashboards.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param dashboards.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param dashboards.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param dashboards.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param dashboards.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param dashboards.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param dashboards.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param dashboards.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param dashboards.command Override default container command (useful when using custom images) + ## + command: [] + ## @param dashboards.args Override default container args (useful when using custom images) + ## + args: [] + ## @param dashboards.lifecycleHooks for the data container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param dashboards.extraEnvVars Array with extra environment variables to add to data nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param dashboards.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data nodes + ## + extraEnvVarsCM: "" + ## @param dashboards.extraEnvVarsSecret Name of existing Secret containing extra env vars for data nodes + ## + extraEnvVarsSecret: "" + ## @param dashboards.extraVolumes Optionally specify extra list of additional volumes for the data pod(s) + ## + extraVolumes: [] + ## @param dashboards.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the data container(s) + ## + extraVolumeMounts: [] + ## @param dashboards.sidecars Add additional sidecar containers to the data pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param dashboards.initContainers Add additional init containers to the data pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param dashboards.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param dashboards.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param dashboards.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param dashboards.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: false + annotations: {} + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param dashboards.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param dashboards.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports OpenSearch is + ## listening on. When true, OpenSearch will accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param dashboards.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param dashboards.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param dashboards.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param dashboards.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param dashboards.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + autoscaling: + vpa: + ## @param dashboards.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param dashboards.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param dashboards.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param dashboards.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param dashboards.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param dashboards.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param dashboards.autoscaling.hpa.enabled Enable HPA for APISIX Data Plane + ## + enabled: false + ## @param dashboards.autoscaling.hpa.minReplicas Minimum number of APISIX Data Plane replicas + ## + minReplicas: 3 + ## @param dashboards.autoscaling.hpa.maxReplicas Maximum number of APISIX Data Plane replicas + ## + maxReplicas: 11 + ## @param dashboards.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param dashboards.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## TLS configuration + ## + tls: + ## @param dashboards.tls.enabled Enable TLS for OpenSearch Dashboards webserver + ## + enabled: false + ## @param dashboards.tls.existingSecret Existing secret containing the certificates for OpenSearch Dashboards webserver + ## + existingSecret: "" + ## @param dashboards.tls.autoGenerated Create self-signed TLS certificates. + ## NOTE: If autoGenerated certs are enabled and a new node type is enabled using helm upgrade, make sure you remove previously existing TLS secrets. + ## Otherwise, the new node certs won't match the existing certs. + ## + autoGenerated: true + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param dashboards.persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: false + ## @param dashboards.persistence.mountPath Path to mount the volume at. + ## + mountPath: /drycc/opensearch-dashboards + ## @param dashboards.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param dashboards.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param dashboards.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param dashboards.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param dashboards.persistence.size Size of data volume + ## + size: 8Gi + ## @param dashboards.persistence.existingClaim The name of an existing PVC to use for persistence + ## + existingClaim: "" + ## @param dashboards.persistence.selector Selector to match an existing Persistent Volume for OpenSearch data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param dashboards.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section OpenSearch Snapshots Parameters + +snapshots: + ## @param snapshots.enabled Enable automatic setup of repositories and snapshot policies + ## + enabled: false + ## @param snapshots.command Override default container command (useful when using custom images) + ## + command: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param snapshots.containerSecurityContext.enabled Enabled containers' Security Context + ## @param snapshots.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param snapshots.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param snapshots.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param snapshots.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param snapshots.containerSecurityContext.privileged Set container's Security Context privileged + ## @param snapshots.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param snapshots.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param snapshots.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param snapshots.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param snapshots.fullnameOverride String to fully override opensearch.snapshots.fullname + ## + fullnameOverride: "" + ## Bitnami OpenSearch Snapshots image + ## @param snapshots.image.registry [default: REGISTRY_NAME] OpenSearch Snapshots image registry + ## @param snapshots.image.repository [default: REPOSITORY_NAME/os-shell] OpenSearch Snapshots image repository + ## @skip snapshots.image.tag OpenSearch Snapshots image tag (immutable tags are recommended) + ## @param snapshots.image.digest OpenSearch Snapshots image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param snapshots.image.pullPolicy OpenSearch Snapshots image pull policy + ## @param snapshots.image.pullSecrets OpenSearch Snapshots image pull secrets + ## @param snapshots.image.debug Enable OpenSearch Snapshots image debug mode + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/opensearch + tag: "3.0" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param snapshots.nameOverride String to partially override common.names.fullname + ## + nameOverride: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param snapshots.persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: false + ## @param snapshots.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteMany + ## @param snapshots.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param snapshots.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param snapshots.persistence.existingClaim The name of an existing PVC to use for persistence + ## + existingClaim: "" + ## @param snapshots.persistence.labels Extra labels for the Persistent Volume Claim + ## + labels: {} + ## @param snapshots.persistence.selector Selector to match an existing Persistent Volume for OpenSearch data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param snapshots.persistence.size Size of data volume + ## + size: 8Gi + ## @param snapshots.persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param snapshots.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services + ## + subPath: "" + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param snapshots.podSecurityContext.enabled Enabled data pods' Security Context + ## @param snapshots.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param snapshots.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param snapshots.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param snapshots.podSecurityContext.fsGroup Set snapshots pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## @param snapshots.policies [default: SNAPSHOT_POLICY_API_BODY] Each object represents a snapshot policy in YAML form, which will be converted to JSON and then passed as the HTTP body data to the OpenSearch REST API. + ## NOTE: The field `snapshot_config.repository` in each policy must match a repo name in `snapshots.repositories`. + ## + policies: + default: + creation: + schedule: + cron: + expression: "0 20 * * *" + timezone: UTC + deletion: + condition: + max_age: 7d + min_count: 1 + schedule: + cron: + expression: 0 20 * * * + timezone: UTC + description: Default snapshot policy + enabled: true + snapshot_config: + repository: default + ## @param snapshots.repositories [default: SNAPSHOT_REPO_API_BODY] Each object represents a snapshot repository in YAML form, which will be converted to JSON and then passed as the HTTP body data to the OpenSearch REST API. + ## NOTE: The field `settings.location` in each repo must match the snapshot repo path configured in `snapshotRepoPath`, if its `type` is `fs`. + ## + repositories: + default: + settings: + location: /snapshots + type: fs + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param snapshots.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if snapshots.resources is set (snapshots.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param snapshots.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 100m + ## memory: 10Mi + ## limits: + ## cpu: 200m + ## memory: 20Mi + ## + resources: {} diff --git a/addons/opensearch/3.0/meta.yaml b/addons/opensearch/3.0/meta.yaml new file mode 100644 index 00000000..22ff92e1 --- /dev/null +++ b/addons/opensearch/3.0/meta.yaml @@ -0,0 +1,66 @@ +name: opensearch-3.0 +version: "3.0" +id: 44553de0-804c-4bed-8057-b72dbc0a02a9 +description: "opensearch-3.0." +displayName: "opensearch-3.0" +metadata: + displayName: "opensearch-3.0" + provider: + name: drycc + supportURL: https://opensearch.org/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/opensearch +tags: opensearch +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "extraConfig" + required: false + description: "extraConfig config for values.yaml" +- name: "plugins" + required: false + description: "plugins config for values.yaml" +- name: "config" + required: false + description: "opensearch configuration for values.yaml" +- name: "master.nodeSelector" + required: false + description: "master nodeSelector config for values.yaml" +- name: "master.networkPolicy.allowNamespaces" + required: false + description: "master networkPolicy allowNamespaces config for values.yaml" +- name: "ingest.nodeSelector" + required: false + description: "ingest nodeSelector config for values.yaml" +- name: "ingest.networkPolicy.allowNamespaces" + required: false + description: "ingest networkPolicy allowNamespaces config for values.yaml" +- name: "data.nodeSelector" + required: false + description: "data nodeSelector config for values.yaml" +- name: "data.networkPolicy.allowNamespaces" + required: false + description: "data networkPolicy allowNamespaces config for values.yaml" +- name: "coordinating.nodeSelector" + required: false + description: "coordinating nodeSelector config for values.yaml" +- name: "coordinating.networkPolicy.allowNamespaces" + required: false + description: "coordinating networkPolicy allowNamespaces config for values.yaml" +- name: "dashboards.nodeSelector" + required: false + description: "dashboards nodeSelector config for values.yaml" +- name: "dashboards.networkPolicy.allowNamespaces" + required: false + description: "dashboards networkPolicy allowNamespaces config for values.yaml" +- name: "snapshots.nodeSelector" + required: false + description: "snapshots nodeSelector config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "dashboards.service.type" + required: false + description: "dashboards service type config for values.yaml" +archive: false diff --git a/addons/opensearch/2.10/plans/standard-1c2g16/bind.yaml b/addons/opensearch/3.0/plans/standard-2c4g32/bind.yaml similarity index 100% rename from addons/opensearch/2.10/plans/standard-1c2g16/bind.yaml rename to addons/opensearch/3.0/plans/standard-2c4g32/bind.yaml diff --git a/addons/opensearch/2.10/plans/standard-1c2g16/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-2c4g32/create-instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-1c2g16/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-2c4g32/create-instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml b/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml new file mode 100644 index 00000000..986bed1f --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g32" +id: 3b3addda-c4fc-4a59-bbf5-0f640920e09f +description: "Opensearch standard-2c4g32 plan which limit resources 2 cores 4Gi memory and persistence size 32Gi." +displayName: "standard-2c4g32" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/2.10/plans/standard-1c2g16/values.yaml b/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml similarity index 97% rename from addons/opensearch/2.10/plans/standard-1c2g16/values.yaml rename to addons/opensearch/3.0/plans/standard-2c4g32/values.yaml index 9172ea49..157ca24b 100644 --- a/addons/opensearch/2.10/plans/standard-1c2g16/values.yaml +++ b/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: "hb-opensearch-standard-1c2g16" +fullnameOverride: "hb-opensearch-standard-2c4g32" ## @section Master-elegible nodes parameters master: @@ -21,18 +21,18 @@ master: ## resources: limits: + cpu: 2 + memory: 4Gi + requests: cpu: 1 memory: 2Gi - requests: - cpu: 500m - memory: 1Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. ## Example: ## heapSize: 128m ## - heapSize: 512m + heapSize: 1024m ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -54,7 +54,7 @@ master: - ReadWriteOnce ## @param master.persistence.size Persistent Volume Size ## - size: 16Gi + size: 32Gi ## @section Data-only nodes parameters data: @@ -94,7 +94,7 @@ data: - ReadWriteOnce ## @param data.persistence.size Persistent Volume Size ## - size: 16Gi + size: 8Gi ## @section Coordinating-only nodes parameters coordinating: @@ -151,7 +151,7 @@ dashboards: resources: limits: memory: 2048Mi - cpu: 250m + cpu: 500m requests: memory: 1024Mi - cpu: 125m + cpu: 250m diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml b/addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml b/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml new file mode 100644 index 00000000..88554185 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g64" +id: cd949799-61e9-454e-a9df-26299922be5a +description: "Opensearch standard-2c4g64 plan which limit resources 2 cores 4Gi memory and persistence size 64Gi." +displayName: "standard-2c4g64" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml b/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml new file mode 100644 index 00000000..10a0106f --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-2c4g64" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 4096m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 64Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 2048Mi + cpu: 500m + requests: + memory: 1024Mi + cpu: 250m diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/bind.yaml b/addons/opensearch/3.0/plans/standard-4c16g256/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g256/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/meta.yaml b/addons/opensearch/3.0/plans/standard-4c16g256/meta.yaml new file mode 100644 index 00000000..9dfdc711 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g256/meta.yaml @@ -0,0 +1,7 @@ +name: "standard-4c16g256" +id: 459e2bd5-0a37-4b4c-a371-634157f822f9 +description: "Opensearch standard-4c16g256 plan which limit resources 4 cores 16Gi memory and persistence size 256Gi." +displayName: "standard-4c16g256" +bindable: true +maximum_polling_duration: 1800 + \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml b/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml new file mode 100644 index 00000000..0b15b099 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-4c16g256" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 2 + memory: 8Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 4096m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 256Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 2048Mi + cpu: 1000m + requests: + memory: 1024Mi + cpu: 500m diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/bind.yaml b/addons/opensearch/3.0/plans/standard-4c16g512/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g512/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/meta.yaml b/addons/opensearch/3.0/plans/standard-4c16g512/meta.yaml new file mode 100644 index 00000000..166dc6fe --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g512/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g512" +id: 6f0d84cd-a398-4836-af5a-dc383dc38a0c +description: "Opensearch standard-4c16g512 plan which limit resources 4 cores 16Gi memory and persistence size 512Gi." +displayName: "standard-4c16g512" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml b/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml new file mode 100644 index 00000000..8abebf0c --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-4c16g512" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 2 + memory: 8Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 8192m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 512Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 3072Mi + cpu: 1500m + requests: + memory: 1024Mi + cpu: 150m diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/bind.yaml b/addons/opensearch/3.0/plans/standard-4c8g128/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c8g128/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/meta.yaml b/addons/opensearch/3.0/plans/standard-4c8g128/meta.yaml new file mode 100644 index 00000000..a672e5bf --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c8g128/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g128" +id: 2367a9ec-1ad6-40a9-ba35-a0721f0e57de +description: "Opensearch standard-4c8g128 plan which limit resources 4 cores 8Gi memory and persistence size 128Gi." +displayName: "standard-4c8g128" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml b/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml new file mode 100644 index 00000000..21e4c16c --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-4c8g128" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 2 + memory: 4Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 4096m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 128Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 2048Mi + cpu: 1000m + requests: + memory: 1024Mi + cpu: 500m diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/bind.yaml b/addons/opensearch/3.0/plans/standard-8c32g1024/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g1024/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/meta.yaml b/addons/opensearch/3.0/plans/standard-8c32g1024/meta.yaml new file mode 100644 index 00000000..65d945c9 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g1024/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g1024" +id: ff980185-f548-46d7-9739-ebe7b3fb8757 +description: "Opensearch standard-8c32g1024 plan which limit resources 8 cores 32Gi memory and persistence size 1Ti." +displayName: "standard-8c32g1024" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml new file mode 100644 index 00000000..5829f62a --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-8c32g1024" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 4 + memory: 16Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 16384m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 1024Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 4096Mi + cpu: 2000m + requests: + memory: 2058Mi + cpu: 1000m diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/bind.yaml b/addons/opensearch/3.0/plans/standard-8c32g2048/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g2048/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/meta.yaml b/addons/opensearch/3.0/plans/standard-8c32g2048/meta.yaml new file mode 100644 index 00000000..1a9587fa --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g2048/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g2048" +id: 7e164160-e379-4975-8b00-f7f1dabc11ca +description: "Opensearch standard-8c32g2048 plan which limit resources 8 cores 32Gi memory and persistence size 2Ti." +displayName: "standard-8c32g2048" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml new file mode 100644 index 00000000..50a21715 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-8c32g2048" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 4 + memory: 16Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 16384m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 2048Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 4096Mi + cpu: 2000m + requests: + memory: 2058Mi + cpu: 1000m diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/bind.yaml b/addons/opensearch/3.0/plans/standard-8c32g768/bind.yaml new file mode 100644 index 00000000..8d61f02b --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g768/bind.yaml @@ -0,0 +1,123 @@ +credential: +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DOMAIN + value: {{ include "opensearch.service.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: OPENSEARCH_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_TCP_REST_API_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rest-api")].port }' + + - name: OPENSEARCH_TCP_TRANSPORT_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.service.name" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-transport")].port }' + + {{- if .Values.dashboards.enabled }} + {{ if (eq .Values.dashboards.service.type "LoadBalancer") }} + - name: EXTERNAL_OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: OPENSEARCH_DASHBOARDS_HOST + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: OPENSEARCH_DASHBOARDS_PORT + valueFrom: + serviceRef: + name: {{ include "opensearch.dashboards.servicename" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + {{- end }} + + + {{- if .Values.security.enabled }} + - name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-password }' + + - name: OPENSEARCH_DASHBOARDS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.opensearch-dashboards-password }' + + - name: LOGSTASH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.logstash-password }' + {{- end }} + + + {{- if (include "opensearch.createTlsSecret" .) }} + {{ if not (include "opensearch.coordinating.enabled" .) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_MASTER_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.master.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} + {{- $serviceName := include "opensearch.data.servicename" . }} + - name: OPENSEARCH_DATA_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_DATA_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.data.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} + - name: OPENSEARCH_DOMAIN + value: {{ (printf "%s.%s.svc.%s" (include "opensearch.service.name" .) $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_COORDINATING_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.coordinating.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + + {{- if and (include "opensearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} + {{- $serviceName := include "opensearch.ingest.servicename" . }} + - name: OPENSEARCH_INGEST_DOMAIN + value: {{ (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) }} + + - name: OPESEARCH_INGEST_CA_CRT + valueFrom: + secretKeyRef: + name: {{ printf "%s-crt" (include "opensearch.ingest.fullname" .) }} + jsonpath: '{ .data.ca\.crt }' + {{- end }} + {{- end }} diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/meta.yaml b/addons/opensearch/3.0/plans/standard-8c32g768/meta.yaml new file mode 100644 index 00000000..d15c1158 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g768/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g768" +id: 0628150c-5d6b-4c11-bb2f-2b28847ea630 +description: "Opensearch standard-8c32g768 plan which limit resources 8 cores 32Gi memory and persistence size 768Gi." +displayName: "standard-8c32g768" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml new file mode 100644 index 00000000..01d271dc --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml @@ -0,0 +1,157 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-opensearch-standard-8c32g768" +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the OpenSearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: false + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 3 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for opensearch containers + ## @param master.resources.requests The requested resources for opensearch containers + ## + resources: + limits: + cpu: 8 + memory: 32Gi + requests: + cpu: 4 + memory: 16Gi + ## @param master.heapSize OpenSearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 16384m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 768Gi +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: false + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 0 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + # resources: + # limits: + # memory: 512Mi + # cpu: 250m + # requests: + # memory: 256Mi + # cpu: 125m +## @section OpenSearch Dashborads Parameters + +dashboards: + ## @param dashboards.enabled Enables OpenSearch Dashboards deployment + ## + enabled: true + ## @param dashboards.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## OpenSearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dashboards.resources.limits The resources limits for the data containers + ## @param dashboards.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: + memory: 3072Mi + cpu: 1500m + requests: + memory: 1024Mi + cpu: 750m From 0ffde83ba100e526f7fe11b95dac380d9d05bfdc Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 12 Jun 2025 11:50:07 +0800 Subject: [PATCH 59/93] chore(grafana): store db in postgres (#108) --- .../chart/grafana/templates/configmap-ini.yaml | 18 ++++++++++++++++++ .../10/chart/grafana/templates/deployment.yaml | 2 +- addons/grafana/10/chart/grafana/values.yaml | 7 ++++--- addons/grafana/10/meta.yaml | 3 +++ 4 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 addons/grafana/10/chart/grafana/templates/configmap-ini.yaml diff --git a/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml b/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml new file mode 100644 index 00000000..980d0878 --- /dev/null +++ b/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-ini + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: grafana + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + grafana.ini: | + [database] + type = postgres + url = {{ .Values.pg_url | quote }} diff --git a/addons/grafana/10/chart/grafana/templates/deployment.yaml b/addons/grafana/10/chart/grafana/templates/deployment.yaml index 38426a73..cf1594a9 100644 --- a/addons/grafana/10/chart/grafana/templates/deployment.yaml +++ b/addons/grafana/10/chart/grafana/templates/deployment.yaml @@ -321,7 +321,7 @@ spec: - name: grafana-ini {{- if .Values.config.grafanaIniConfigMap }} configMap: - name: {{ .Values.config.grafanaIniConfigMap }} + name: {{ include "common.names.fullname" . }}-ini {{- else if .Values.config.grafanaIniSecret }} secret: secretName: {{ .Values.config.grafanaIniSecret }} diff --git a/addons/grafana/10/chart/grafana/values.yaml b/addons/grafana/10/chart/grafana/values.yaml index 753732fa..9bec5e4d 100644 --- a/addons/grafana/10/chart/grafana/values.yaml +++ b/addons/grafana/10/chart/grafana/values.yaml @@ -209,14 +209,15 @@ ldap: ## @param config.grafanaIniSecret Name of the Secret containing the `grafana.ini` file ## config: - useGrafanaIniFile: false - grafanaIniConfigMap: "" + useGrafanaIniFile: true + grafanaIniConfigMap: "grafana-ini" grafanaIniSecret: "" ## Create dasboard provider to load dashboards, a default one is created to load dashboards ## from "/opt/drycc/grafana/dashboards" ## @param dashboardsProvider.enabled Enable the use of a Grafana dashboard provider ## @param dashboardsProvider.configMapName Name of a ConfigMap containing a custom dashboard provider ## +pg_url: "postgres://user:secret@host:port/database" dashboardsProvider: enabled: false ## Important to set the Path to "/opt/drycc/grafana/dashboards" @@ -542,7 +543,7 @@ grafana: ## @param persistence.size Size for the PV ## persistence: - enabled: true + enabled: false ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is diff --git a/addons/grafana/10/meta.yaml b/addons/grafana/10/meta.yaml index 8e4a9eac..848290eb 100644 --- a/addons/grafana/10/meta.yaml +++ b/addons/grafana/10/meta.yaml @@ -23,4 +23,7 @@ allow_parameters: description: "service type config for values.yaml" - name: "grafana.nodeSelector" required: false +- name: "pg_url" + required: true + description: "Postgres URL for Grafana to connect to" archive: false From 9ff2715ce168b3e3d2e87e897163af0f4b0828d0 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 12 Jun 2025 16:49:54 +0800 Subject: [PATCH 60/93] chore(grafana): Adjusting the number of replicas to support high availability (#109) --- addons/grafana/10/chart/grafana/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/grafana/10/chart/grafana/values.yaml b/addons/grafana/10/chart/grafana/values.yaml index 9bec5e4d..f76a5b7a 100644 --- a/addons/grafana/10/chart/grafana/values.yaml +++ b/addons/grafana/10/chart/grafana/values.yaml @@ -269,7 +269,7 @@ notifiers: grafana: ## @param grafana.replicaCount Number of Grafana nodes ## - replicaCount: 1 + replicaCount: 2 ## @param grafana.updateStrategy.type Set up update strategy for Grafana installation. ## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to make sure the pods is destroyed first. ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy From f395bf38f70c64d86d2e4f953e4bdf0e4982aa7d Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 17 Jun 2025 14:48:32 +0800 Subject: [PATCH 61/93] chore(grafana): support ha (#110) --- .../10/chart/grafana/templates/configmap-ini.yaml | 7 +++++++ .../grafana/10/chart/grafana/templates/deployment.yaml | 10 ++++++++++ addons/grafana/10/chart/grafana/templates/service.yaml | 8 ++++++++ addons/grafana/10/chart/grafana/values.yaml | 6 +++++- addons/grafana/10/meta.yaml | 2 +- 5 files changed, 31 insertions(+), 2 deletions(-) diff --git a/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml b/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml index 980d0878..edc57af1 100644 --- a/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml +++ b/addons/grafana/10/chart/grafana/templates/configmap-ini.yaml @@ -13,6 +13,13 @@ metadata: {{- end }} data: grafana.ini: | + [unified_alerting] + enabled = true + ha_listen_address = "${POD_IP}:9094" + ha_peers = "{{ include "common.names.fullname" . }}:9094" + ha_advertise_address = "${POD_IP}:9094" + ha_peer_timeout = 15s + ha_reconnect_timeout = 2m [database] type = postgres url = {{ .Values.pg_url | quote }} diff --git a/addons/grafana/10/chart/grafana/templates/deployment.yaml b/addons/grafana/10/chart/grafana/templates/deployment.yaml index cf1594a9..b354727b 100644 --- a/addons/grafana/10/chart/grafana/templates/deployment.yaml +++ b/addons/grafana/10/chart/grafana/templates/deployment.yaml @@ -128,6 +128,10 @@ spec: name: {{ include "common.tplvalues.render" (dict "value" .Values.grafana.extraEnvVarsSecret "context" $) }} {{- end }} env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: GF_SECURITY_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -217,6 +221,12 @@ spec: - name: dashboard containerPort: {{ .Values.grafana.containerPorts.grafana }} protocol: TCP + - name: gossip-tcp + containerPort: {{ .Values.grafana.containerPorts.gossipTCP }} + protocol: TCP + - name: gossip-udp + containerPort: {{ .Values.grafana.containerPorts.gossipUDP }} + protocol: UDP {{- if .Values.grafana.customLivenessProbe }} livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.grafana.customLivenessProbe "context" $) | nindent 12 }} {{- else if .Values.grafana.livenessProbe.enabled }} diff --git a/addons/grafana/10/chart/grafana/templates/service.yaml b/addons/grafana/10/chart/grafana/templates/service.yaml index 8eb7182d..5a7c9dc8 100644 --- a/addons/grafana/10/chart/grafana/templates/service.yaml +++ b/addons/grafana/10/chart/grafana/templates/service.yaml @@ -45,6 +45,14 @@ spec: targetPort: dashboard protocol: TCP name: http + - port: {{ .Values.service.ports.gossipTCP }} + targetPort: gossip-tcp + protocol: TCP + name: gossip-tcp + - port: {{ .Values.service.ports.gossipUDP }} + targetPort: gossip-udp + protocol: UDP + name: gossip-udp {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.grafana)) }} nodePort: {{ .Values.service.nodePorts.grafana }} {{- else if eq .Values.service.type "ClusterIP" }} diff --git a/addons/grafana/10/chart/grafana/values.yaml b/addons/grafana/10/chart/grafana/values.yaml index f76a5b7a..4f2c4be7 100644 --- a/addons/grafana/10/chart/grafana/values.yaml +++ b/addons/grafana/10/chart/grafana/values.yaml @@ -318,6 +318,8 @@ grafana: ## containerPorts: grafana: 3000 + gossipTCP: 9094 + gossipUDP: 9094 ## Node affinity preset ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## @param grafana.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` @@ -586,6 +588,8 @@ service: ## ports: grafana: 3000 + gossipTCP: 9094 + gossipUDP: 9094 ## @param service.nodePorts.grafana Specify the nodePort value for the LoadBalancer and NodePort service types ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## @@ -733,7 +737,7 @@ ingress: metrics: ## @param metrics.enabled Enable the export of Prometheus metrics ## - enabled: false + enabled: true ## Prometheus Operator ServiceMonitor configuration ## @param metrics.service.annotations [object] Annotations for Prometheus metrics service ## diff --git a/addons/grafana/10/meta.yaml b/addons/grafana/10/meta.yaml index 848290eb..888100cd 100644 --- a/addons/grafana/10/meta.yaml +++ b/addons/grafana/10/meta.yaml @@ -13,7 +13,7 @@ tags: grafana bindable: true instances_retrievable: true bindings_retrievable: true -plan_updateable: false +plan_updateable: true allow_parameters: - name: "networkPolicy.allowNamespaces" required: false From be4a55050fb85b3d6c938599e2c5e92dbdd78852 Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 25 Jun 2025 11:50:43 +0800 Subject: [PATCH 62/93] chore(addons): add victoriametrics (#112) --- addons/index.yaml | 5 +- .../1/chart/victoriametrics/.helmignore | 25 + .../1/chart/victoriametrics/Chart.yaml | 28 + .../1/chart/victoriametrics/README.md | 1159 ++++++ .../chart/victoriametrics/templates/NOTES.txt | 168 + .../victoriametrics/templates/_helpers.tpl | 283 ++ .../chart/victoriametrics/templates/bind.yaml | 24 + .../victoriametrics/templates/extra-list.yaml | 9 + .../templates/vmagent/configmap.yaml | 215 + .../templates/vmagent/dep-ds.yaml | 215 + .../templates/vmagent/hpa.yaml | 42 + .../templates/vmagent/networkpolicy.yaml | 99 + .../templates/vmagent/pdb.yaml | 29 + .../templates/vmagent/rbac.yaml | 72 + .../templates/vmagent/service-account.yaml | 20 + .../templates/vmagent/service.yaml | 92 + .../templates/vmagent/servicemonitor.yaml | 48 + .../templates/vmagent/vpa.yaml | 50 + .../templates/vmalert/configmap.yaml | 31 + .../templates/vmalert/deployment.yaml | 185 + .../templates/vmalert/hpa.yaml | 42 + .../templates/vmalert/networkpolicy.yaml | 76 + .../templates/vmalert/pdb.yaml | 29 + .../templates/vmalert/service-account.yaml | 20 + .../templates/vmalert/service.yaml | 59 + .../templates/vmalert/servicemonitor.yaml | 48 + .../templates/vmalert/vpa.yaml | 45 + .../templates/vmauth/dep-ds.yaml | 178 + .../victoriametrics/templates/vmauth/hpa.yaml | 42 + .../templates/vmauth/ingress-tls-secret.yaml | 48 + .../templates/vmauth/ingress.yaml | 57 + .../templates/vmauth/networkpolicy.yaml | 100 + .../victoriametrics/templates/vmauth/pdb.yaml | 29 + .../templates/vmauth/secret.yaml | 42 + .../templates/vmauth/service-account.yaml | 20 + .../templates/vmauth/service.yaml | 59 + .../templates/vmauth/servicemonitor.yaml | 48 + .../victoriametrics/templates/vmauth/vpa.yaml | 50 + .../templates/vminsert/deployment.yaml | 160 + .../templates/vminsert/hpa.yaml | 42 + .../vminsert/ingress-tls-secret.yaml | 48 + .../templates/vminsert/ingress.yaml | 57 + .../templates/vminsert/networkpolicy.yaml | 79 + .../templates/vminsert/pdb.yaml | 29 + .../templates/vminsert/service-account.yaml | 20 + .../templates/vminsert/service.yaml | 57 + .../templates/vminsert/servicemonitor.yaml | 48 + .../templates/vminsert/vpa.yaml | 45 + .../templates/vmselect/dep-sts.yaml | 178 + .../templates/vmselect/headless-service.yaml | 30 + .../templates/vmselect/hpa.yaml | 47 + .../vmselect/ingress-tls-secret.yaml | 48 + .../templates/vmselect/ingress.yaml | 57 + .../templates/vmselect/networkpolicy.yaml | 79 + .../templates/vmselect/pdb.yaml | 29 + .../templates/vmselect/service-account.yaml | 20 + .../templates/vmselect/service.yaml | 57 + .../templates/vmselect/servicemonitor.yaml | 48 + .../templates/vmselect/vpa.yaml | 50 + .../templates/vmstorage/headless-service.yaml | 36 + .../templates/vmstorage/hpa.yaml | 42 + .../templates/vmstorage/networkpolicy.yaml | 73 + .../templates/vmstorage/pdb.yaml | 29 + .../templates/vmstorage/service-account.yaml | 20 + .../templates/vmstorage/service.yaml | 57 + .../templates/vmstorage/servicemonitor.yaml | 48 + .../templates/vmstorage/statefulset.yaml | 225 + .../templates/vmstorage/vpa.yaml | 45 + .../1/chart/victoriametrics/values.yaml | 3670 +++++++++++++++++ addons/victoriametrics/1/demo.yaml | 17 + addons/victoriametrics/1/meta.yaml | 45 + .../1/plans/standard-16c32g500/bind.yaml | 24 + .../create-instance-schema.json | 12 + .../1/plans/standard-16c32g500/meta.yaml | 6 + .../1/plans/standard-16c32g500/values.yaml | 44 + .../1/plans/standard-1c1g10/bind.yaml | 24 + .../create-instance-schema.json | 12 + .../1/plans/standard-1c1g10/meta.yaml | 6 + .../1/plans/standard-1c1g10/values.yaml | 44 + .../1/plans/standard-2c4g50/bind.yaml | 24 + .../create-instance-schema.json | 12 + .../1/plans/standard-2c4g50/meta.yaml | 6 + .../1/plans/standard-2c4g50/values.yaml | 44 + .../1/plans/standard-4c8g100/bind.yaml | 24 + .../create-instance-schema.json | 12 + .../1/plans/standard-4c8g100/meta.yaml | 6 + .../1/plans/standard-4c8g100/values.yaml | 44 + .../1/plans/standard-8c16g200/bind.yaml | 24 + .../create-instance-schema.json | 12 + .../1/plans/standard-8c16g200/meta.yaml | 6 + .../1/plans/standard-8c16g200/values.yaml | 44 + 91 files changed, 9755 insertions(+), 1 deletion(-) create mode 100644 addons/victoriametrics/1/chart/victoriametrics/.helmignore create mode 100644 addons/victoriametrics/1/chart/victoriametrics/Chart.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/README.md create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/NOTES.txt create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/_helpers.tpl create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/bind.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/extra-list.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/configmap.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/dep-ds.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/rbac.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/configmap.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/deployment.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/dep-ds.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress-tls-secret.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/secret.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/deployment.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress-tls-secret.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/dep-sts.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/headless-service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress-tls-secret.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/headless-service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/hpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/networkpolicy.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/pdb.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service-account.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/servicemonitor.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/statefulset.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/vpa.yaml create mode 100644 addons/victoriametrics/1/chart/victoriametrics/values.yaml create mode 100644 addons/victoriametrics/1/demo.yaml create mode 100644 addons/victoriametrics/1/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-16c32g500/bind.yaml create mode 100644 addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json create mode 100644 addons/victoriametrics/1/plans/standard-16c32g500/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-16c32g500/values.yaml create mode 100644 addons/victoriametrics/1/plans/standard-1c1g10/bind.yaml create mode 100644 addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json create mode 100644 addons/victoriametrics/1/plans/standard-1c1g10/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-1c1g10/values.yaml create mode 100644 addons/victoriametrics/1/plans/standard-2c4g50/bind.yaml create mode 100644 addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json create mode 100644 addons/victoriametrics/1/plans/standard-2c4g50/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-2c4g50/values.yaml create mode 100644 addons/victoriametrics/1/plans/standard-4c8g100/bind.yaml create mode 100644 addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json create mode 100644 addons/victoriametrics/1/plans/standard-4c8g100/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-4c8g100/values.yaml create mode 100644 addons/victoriametrics/1/plans/standard-8c16g200/bind.yaml create mode 100644 addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json create mode 100644 addons/victoriametrics/1/plans/standard-8c16g200/meta.yaml create mode 100644 addons/victoriametrics/1/plans/standard-8c16g200/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index 3c5fee53..ef1f050f 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -81,4 +81,7 @@ entries: description: "Transactional Catalog for Data Lakes with Git-like semantics . " lakefs: - version: "1.52" - description: "LakeFS provides version control over the data lake, and uses Git-like semantics to create and access those versions. If you know git, you’ll be right at home with lakeFS. " \ No newline at end of file + description: "LakeFS provides version control over the data lake, and uses Git-like semantics to create and access those versions. If you know git, you’ll be right at home with lakeFS. " + victoriametrics: + - version: "1" + description: "VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It is designed to handle large amounts of data with high performance and low resource usage." diff --git a/addons/victoriametrics/1/chart/victoriametrics/.helmignore b/addons/victoriametrics/1/chart/victoriametrics/.helmignore new file mode 100644 index 00000000..207983f3 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# img folder +img/ +# Changelog +CHANGELOG.md diff --git a/addons/victoriametrics/1/chart/victoriametrics/Chart.yaml b/addons/victoriametrics/1/chart/victoriametrics/Chart.yaml new file mode 100644 index 00000000..9ef8fa6b --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/Chart.yaml @@ -0,0 +1,28 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 1.118.0 +dependencies: +- name: common + repository: oci://registry.drycc.cc/charts + tags: + - drycc-common + version: ~1.1.3 +description: VictoriaMetrics is a fast, cost-effective, and scalable monitoring solution + and time series database, compatible with Prometheus and Graphite +home: https://drycc.com +icon: https://dyltqmyl993wv.cloudfront.net/assets/stacks/victoriametrics/img/victoriametrics-stack-220x234.png +keywords: +- monitoring +- metrics +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/drycc/charts +name: victoriametrics +sources: +- https://github.com/drycc/charts/tree/main/drycc/victoriametrics +version: 0.1.13 diff --git a/addons/victoriametrics/1/chart/victoriametrics/README.md b/addons/victoriametrics/1/chart/victoriametrics/README.md new file mode 100644 index 00000000..3d92c67c --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/README.md @@ -0,0 +1,1159 @@ + + +# Drycc package for VictoriaMetrics + +VictoriaMetrics is a fast, cost-effective, and scalable monitoring solution and time series database, compatible with Prometheus and Graphite + +[Overview of VictoriaMetrics](https://victoriametrics.com/) + +Trademarks: This software listing is packaged by Drycc. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/Drycccharts/victoriametrics +``` + +Looking to use VictoriaMetrics in production? Try [VMware Tanzu Application Catalog](https://Drycc.com/enterprise), the commercial edition of the Drycc catalog. + +## Introduction + +This chart bootstraps a [VictoriaMetrics](https://github.com/Drycc/containers/tree/main/Drycc/victoriametrics-vmselect) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/victoriametrics +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Drycc, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=Drycccharts`. + +The command deploys VictoriaMetrics on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Available VictoriaMetrics components + +The VictoriaMetrics chart always deploys the `vminsert`, `vmstorage` and `vmselect` components, as part of the basic [open-source VictoriaMetrics cluster installation](https://docs.victoriametrics.com/cluster-victoriametrics/). Additionally, it is possible to deploy the [`vmauth`](https://docs.victoriametrics.com/vmauth/), [`vmalert`](https://docs.victoriametrics.com/vmalert/) or [`vmagent`](https://docs.victoriametrics.com/vmagent/) components by setting `vmauth.enabled=true`, `vmagent.enabled=true` or `vmalert.enabled=true`. + +### Resource requests and limits + +Drycc charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the Drycc/common chart](https://github.com/Drycc/charts/blob/main/Drycc/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Drycc will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Backup and restore + +To back up and restore Helm chart deployments on Kubernetes, you need to back up the persistent volumes from the source deployment and attach them to a new deployment using [Velero](https://velero.io/), a Kubernetes backup/restore tool. Find the instructions for using Velero in [this guide](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-backup-restore-deployments-velero-index.html). + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `*.metrics.enabled` (under the `vminsert`, `vmselect`, `vmstorage`, `vmagent`, `vmalert` and `vmauth` sections) to `true`. This will expose VictoriaMetrics native Prometheus ports in the containers. Additionally, it will deploy several `metrics` services, which can be configured under the `*.metrics.service` section (under the `vminsert`, `vmselect`, `vmstorage`, `vmagent`, `vmalert` and `vmauth` sections). These `metrics` services will have the necessary annotations to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Drycc Prometheus helm chart](https://github.com/Drycc/charts/tree/main/Drycc/prometheus) or the [Drycc Kube Prometheus helm chart](https://github.com/Drycc/charts/tree/main/Drycc/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `*.metrics.serviceMonitor.enabled=true` (`vminsert`, `vmselect`, `vmstorage`, `vmagent`, `vmalert` and `vmauth`). Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Drycc Kube Prometheus helm chart](https://github.com/Drycc/charts/tree/main/Drycc/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### Ingress + +This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/Drycc/charts/tree/main/Drycc/nginx-ingress-controller) or [contour](https://github.com/Drycc/charts/tree/main/Drycc/contour) you can utilize the ingress controller to serve your application.To enable Ingress integration, set `*.ingress.enabled` (under the `vmselect`, `vmauth` and `vmselect` sections) to `true`. + +The most common scenario is to have one host name mapped to the deployment. In this case, the `*.ingress.hostname` (under the `vmselect`, `vmauth` and `vmselect` sections) property can be used to set the host name. The `*.ingress.tls` parameter can be used to add the TLS configuration for this host. + +However, it is also possible to have more than one host. To facilitate this, the `*.ingress.extraHosts` parameter (if available) can be set with the host names specified as an array. The `*.ingress.extraTLS` (under the `vmselect`, `vmauth` and `vmselect` sections) parameter (if available) can also be used to add the TLS configuration for extra hosts. + +> NOTE: For each host specified in the `*.ingress.extraHosts` (under the `vmselect`, `vmauth` and `vmselect` sections) parameter, it is necessary to set a name, path, and any annotations that the Ingress controller should know about. Not all annotations are supported by all Ingress controllers, but [this annotation reference document](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md) lists the annotations supported by many popular Ingress controllers. + +Adding the TLS parameter (where available) will cause the chart to generate HTTPS URLs, and the application will be available on port 443. The actual TLS secrets do not have to be generated by this chart. However, if TLS is enabled, the Ingress record will not work until the TLS secret exists. + +[Learn more about Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/). + +### Securing traffic using TLS + +This chart facilitates the creation of TLS secrets for use with the Ingress controller (although this is not mandatory). There are several common use cases: + +- Generate certificate secrets based on chart parameters. +- Enable externally generated certificates. +- Manage application certificates via an external service (like [cert-manager](https://github.com/jetstack/cert-manager/)). +- Create self-signed certificates within the chart (if supported). + +In the first two cases, a certificate and a key are needed. Files are expected in `.pem` format. + +Here is an example of a certificate file: + +> NOTE: There may be more than one certificate if there is a certificate chain. + +```text +-----BEGIN CERTIFICATE----- +MIID6TCCAtGgAwIBAgIJAIaCwivkeB5EMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +... +jScrvkiBO65F46KioCL9h5tDvomdU1aqpI/CBzhvZn1c0ZTf87tGQR8NK7v7 +-----END CERTIFICATE----- +``` + +Here is an example of a certificate key: + +```text +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAvLYcyu8f3skuRyUgeeNpeDvYBCDcgq+LsWap6zbX5f8oLqp4 +... +wrj2wDbCDCFmfqnSJ+dKI3vFLlEz44sAV8jX/kd4Y6ZTQhlLbYc= +-----END RSA PRIVATE KEY----- +``` + +- If using Helm to manage the certificates based on the parameters, copy these values into the `certificate` and `key` values for a given `*.ingress.secrets` (under the `vmselect`, `vmauth` and `vmselect` sections) entry. +- If managing TLS secrets separately, it is necessary to create a TLS secret with name `INGRESS_HOSTNAME-tls` (where INGRESS_HOSTNAME is a placeholder to be replaced with the hostname you set using the `*.ingress.hostname` (under the `vmselect`, `vmauth` and `vmselect` sections) parameter). +- If your cluster has a [cert-manager](https://github.com/jetstack/cert-manager) add-on to automate the management and issuance of TLS certificates, add to `*.ingress.annotations` (under the `vmselect`, `vmauth` and `vmselect` sections) the [corresponding ones](https://cert-manager.io/docs/usage/ingress/#supported-annotations) for cert-manager. +- If using self-signed certificates created by Helm, set both `*.ingress.tls` and `*.ingress.selfSigned` (under the `vmselect`, `vmauth` and `vmselect` sections) to `true`. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------- | ---------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `apiVersions` | Override Kubernetes API versions reported by .Capabilities | `[]` | +| `nameOverride` | String to partially override common.names.name | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | + +### VictoriaMetrics common parameters + +| Name | Description | Value | +| --------------- | ----------------------------------- | ------ | +| `envflagEnable` | Enable envflag | `true` | +| `envflagPrefix` | Prefix used for the envflag entries | `VM_` | +| `loggerFormat` | Set format of the logs | `json` | + +### VictoriaMetrics Select Parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `vmselect.image.registry` | VictoriaMetrics Select image registry | `REGISTRY_NAME` | +| `vmselect.image.repository` | VictoriaMetrics Select image repository | `REPOSITORY_NAME/victoriametrics-vmselect` | +| `vmselect.image.digest` | VictoriaMetrics Select image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vmselect.image.pullPolicy` | VictoriaMetrics Select image pull policy | `IfNotPresent` | +| `vmselect.image.pullSecrets` | VictoriaMetrics Select image pull secrets | `[]` | +| `vmselect.replicaCount` | Number of VictoriaMetrics Select replicas to deploy | `1` | +| `vmselect.containerPorts.http` | VictoriaMetrics Select http container port | `8481` | +| `vmselect.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Select containers | `true` | +| `vmselect.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vmselect.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vmselect.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vmselect.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vmselect.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vmselect.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Select containers | `true` | +| `vmselect.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vmselect.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vmselect.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vmselect.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vmselect.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vmselect.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Select containers | `false` | +| `vmselect.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vmselect.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vmselect.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vmselect.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vmselect.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vmselect.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vmselect.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vmselect.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vmselect.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmselect.resources is set (vmselect.resources is recommended for production). | `nano` | +| `vmselect.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vmselect.kind` | Define how to deploy VictoriaMetrics Select (allowed values: deployment or statefulset) | `deployment` | +| `vmselect.podManagementPolicy` | Pod management policy for VictoriaMetrics Storage statefulset | `Parallel` | +| `vmselect.annotations` | Annotations for VictoriaMetrics Select Deployment or StatefulSet | `{}` | +| `vmselect.podSecurityContext.enabled` | Enabled VictoriaMetrics Select pods' Security Context | `true` | +| `vmselect.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vmselect.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vmselect.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vmselect.podSecurityContext.fsGroup` | Set VictoriaMetrics Select pod's Security Context fsGroup | `1001` | +| `vmselect.containerSecurityContext.enabled` | Enabled VictoriaMetrics Select containers' Security Context | `true` | +| `vmselect.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vmselect.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vmselect.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vmselect.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Select containers' Security Context runAsNonRoot | `true` | +| `vmselect.containerSecurityContext.privileged` | Set VictoriaMetrics Select containers' Security Context privileged | `false` | +| `vmselect.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Select containers' Security Context runAsNonRoot | `true` | +| `vmselect.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Select container's privilege escalation | `false` | +| `vmselect.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Select container's Security Context runAsNonRoot | `["ALL"]` | +| `vmselect.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Select container's Security Context seccomp profile | `RuntimeDefault` | +| `vmselect.command` | Override default container command (useful when using custom images) | `[]` | +| `vmselect.args` | Override default container args (useful when using custom images) | `[]` | +| `vmselect.extraArgs` | Add extra arguments to the default command | `[]` | +| `vmselect.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `vmselect.hostAliases` | VictoriaMetrics Select pods host aliases | `[]` | +| `vmselect.podLabels` | Extra labels for VictoriaMetrics Select pods | `{}` | +| `vmselect.podAnnotations` | Annotations for VictoriaMetrics Select pods | `{}` | +| `vmselect.podAffinityPreset` | Pod affinity preset. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmselect.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vmselect.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vmselect.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vmselect.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vmselect.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Select pods | `false` | +| `vmselect.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vmselect.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vmselect.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vmselect.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vmselect.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vmselect.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Select pods | `false` | +| `vmselect.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vmselect.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vmselect.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vmselect.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vmselect.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmselect.nodeAffinityPreset.key` | Node label key to match. Ignored if `vmselect.affinity` is set | `""` | +| `vmselect.nodeAffinityPreset.values` | Node label values to match. Ignored if `vmselect.affinity` is set | `[]` | +| `vmselect.affinity` | Affinity for VictoriaMetrics Select pods assignment | `{}` | +| `vmselect.nodeSelector` | Node labels for VictoriaMetrics Select pods assignment | `{}` | +| `vmselect.tolerations` | Tolerations for VictoriaMetrics Select pods assignment | `[]` | +| `vmselect.updateStrategy.type` | VictoriaMetrics Select statefulset strategy type | `RollingUpdate` | +| `vmselect.priorityClassName` | VictoriaMetrics Select pods' priorityClassName | `""` | +| `vmselect.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vmselect.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Select pods | `""` | +| `vmselect.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vmselect.lifecycleHooks` | for the VictoriaMetrics Select container(s) to automate configuration before or after startup | `{}` | +| `vmselect.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Select nodes | `[]` | +| `vmselect.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Select nodes | `""` | +| `vmselect.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Select nodes | `""` | +| `vmselect.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Select pod(s) | `[]` | +| `vmselect.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Select container(s) | `[]` | +| `vmselect.sidecars` | Add additional sidecar containers to the VictoriaMetrics Select pod(s) | `[]` | +| `vmselect.initContainers` | Add additional init containers to the VictoriaMetrics Select pod(s) | `[]` | + +### VictoriaMetrics Select RBAC Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ---------------------------------------------------------------- | ------- | +| `vmselect.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vmselect.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vmselect.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vmselect.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Select Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `vmselect.service.type` | VictoriaMetrics Select service type | `ClusterIP` | +| `vmselect.service.ports.http` | VictoriaMetrics Select service http port | `8481` | +| `vmselect.service.nodePorts.http` | Node port for HTTP | `""` | +| `vmselect.service.clusterIP` | VictoriaMetrics Select service Cluster IP | `""` | +| `vmselect.service.loadBalancerIP` | VictoriaMetrics Select service Load Balancer IP | `""` | +| `vmselect.service.loadBalancerSourceRanges` | VictoriaMetrics Select service Load Balancer sources | `[]` | +| `vmselect.service.externalTrafficPolicy` | VictoriaMetrics Select service external traffic policy | `Cluster` | +| `vmselect.service.annotations` | Additional custom annotations for VictoriaMetrics Select service | `{}` | +| `vmselect.service.extraPorts` | Extra ports to expose in VictoriaMetrics Select service (normally used with the `sidecars` value) | `[]` | +| `vmselect.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vmselect.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vmselect.service.headless.annotations` | Annotations for the headless service. | `{}` | +| `vmselect.ingress.enabled` | Enable ingress record generation for VictoriaMetrics Select | `false` | +| `vmselect.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `vmselect.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `vmselect.ingress.hostname` | Default host for the ingress record | `vmselect.local` | +| `vmselect.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `vmselect.ingress.path` | Default path for the ingress record | `/` | +| `vmselect.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `vmselect.ingress.tls` | Enable TLS configuration for the host defined at `vmselect.ingress.hostname` parameter | `false` | +| `vmselect.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `vmselect.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `vmselect.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `vmselect.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `vmselect.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `vmselect.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `vmselect.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vmselect.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vmselect.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vmselect.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmselect.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmselect.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vmselect.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Select Metrics Parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vmselect.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vmselect.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vmselect.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vmselect.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vmselect.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vmselect.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vmselect.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vmselect.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vmselect.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `vmselect.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vmselect.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vmselect.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vmselect.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### VictoriaMetrics Insert Parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `vminsert.image.registry` | VictoriaMetrics Insert image registry | `REGISTRY_NAME` | +| `vminsert.image.repository` | VictoriaMetrics Insert image repository | `REPOSITORY_NAME/victoriametrics-vminsert` | +| `vminsert.image.digest` | VictoriaMetrics Insert image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vminsert.image.pullPolicy` | VictoriaMetrics Insert image pull policy | `IfNotPresent` | +| `vminsert.image.pullSecrets` | VictoriaMetrics Insert image pull secrets | `[]` | +| `vminsert.replicaCount` | Number of VictoriaMetrics Insert replicas to deploy | `1` | +| `vminsert.containerPorts.http` | VictoriaMetrics Insert http container port | `8480` | +| `vminsert.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Insert containers | `true` | +| `vminsert.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vminsert.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vminsert.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vminsert.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vminsert.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vminsert.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Insert containers | `true` | +| `vminsert.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vminsert.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vminsert.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vminsert.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vminsert.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vminsert.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Insert containers | `false` | +| `vminsert.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vminsert.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vminsert.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vminsert.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vminsert.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vminsert.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vminsert.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vminsert.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vminsert.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vminsert.resources is set (vminsert.resources is recommended for production). | `nano` | +| `vminsert.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vminsert.deploymentAnnotations` | Annotations for VictoriaMetrics Insert Deployment | `{}` | +| `vminsert.podSecurityContext.enabled` | Enabled VictoriaMetrics Insert pods' Security Context | `true` | +| `vminsert.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vminsert.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vminsert.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vminsert.podSecurityContext.fsGroup` | Set VictoriaMetrics Insert pod's Security Context fsGroup | `1001` | +| `vminsert.containerSecurityContext.enabled` | Enabled VictoriaMetrics Insert containers' Security Context | `true` | +| `vminsert.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vminsert.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vminsert.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vminsert.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Insert containers' Security Context runAsNonRoot | `true` | +| `vminsert.containerSecurityContext.privileged` | Set VictoriaMetrics Insert containers' Security Context privileged | `false` | +| `vminsert.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Insert containers' Security Context runAsNonRoot | `true` | +| `vminsert.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Insert container's privilege escalation | `false` | +| `vminsert.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Insert container's Security Context runAsNonRoot | `["ALL"]` | +| `vminsert.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Insert container's Security Context seccomp profile | `RuntimeDefault` | +| `vminsert.command` | Override default container command (useful when using custom images) | `[]` | +| `vminsert.args` | Override default container args (useful when using custom images) | `[]` | +| `vminsert.extraArgs` | Add extra arguments to the default command | `[]` | +| `vminsert.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `vminsert.hostAliases` | VictoriaMetrics Insert pods host aliases | `[]` | +| `vminsert.podLabels` | Extra labels for VictoriaMetrics Insert pods | `{}` | +| `vminsert.podAnnotations` | Annotations for VictoriaMetrics Insert pods | `{}` | +| `vminsert.podAffinityPreset` | Pod affinity preset. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vminsert.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vminsert.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vminsert.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vminsert.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vminsert.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Insert pods | `false` | +| `vminsert.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vminsert.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vminsert.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vminsert.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vminsert.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vminsert.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Insert pods | `false` | +| `vminsert.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vminsert.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vminsert.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vminsert.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vminsert.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vminsert.nodeAffinityPreset.key` | Node label key to match. Ignored if `vminsert.affinity` is set | `""` | +| `vminsert.nodeAffinityPreset.values` | Node label values to match. Ignored if `vminsert.affinity` is set | `[]` | +| `vminsert.affinity` | Affinity for VictoriaMetrics Insert pods assignment | `{}` | +| `vminsert.nodeSelector` | Node labels for VictoriaMetrics Insert pods assignment | `{}` | +| `vminsert.tolerations` | Tolerations for VictoriaMetrics Insert pods assignment | `[]` | +| `vminsert.updateStrategy.type` | VictoriaMetrics Insert statefulset strategy type | `RollingUpdate` | +| `vminsert.priorityClassName` | VictoriaMetrics Insert pods' priorityClassName | `""` | +| `vminsert.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vminsert.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Insert pods | `""` | +| `vminsert.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vminsert.lifecycleHooks` | for the VictoriaMetrics Insert container(s) to automate configuration before or after startup | `{}` | +| `vminsert.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Insert nodes | `[]` | +| `vminsert.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Insert nodes | `""` | +| `vminsert.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Insert nodes | `""` | +| `vminsert.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Insert pod(s) | `[]` | +| `vminsert.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Insert container(s) | `[]` | +| `vminsert.sidecars` | Add additional sidecar containers to the VictoriaMetrics Insert pod(s) | `[]` | +| `vminsert.initContainers` | Add additional init containers to the VictoriaMetrics Insert pod(s) | `[]` | +| `vminsert.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vminsert.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vminsert.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vminsert.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Insert Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `vminsert.service.type` | VictoriaMetrics Insert service type | `ClusterIP` | +| `vminsert.service.ports.http` | VictoriaMetrics Insert service http port | `8480` | +| `vminsert.service.nodePorts.http` | Node port for HTTP | `""` | +| `vminsert.service.clusterIP` | VictoriaMetrics Insert service Cluster IP | `""` | +| `vminsert.service.loadBalancerIP` | VictoriaMetrics Insert service Load Balancer IP | `""` | +| `vminsert.service.loadBalancerSourceRanges` | VictoriaMetrics Insert service Load Balancer sources | `[]` | +| `vminsert.service.externalTrafficPolicy` | VictoriaMetrics Insert service external traffic policy | `Cluster` | +| `vminsert.service.annotations` | Additional custom annotations for VictoriaMetrics Insert service | `{}` | +| `vminsert.service.extraPorts` | Extra ports to expose in VictoriaMetrics Insert service (normally used with the `sidecars` value) | `[]` | +| `vminsert.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vminsert.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vminsert.ingress.enabled` | Enable ingress record generation for VictoriaMetrics Insert | `false` | +| `vminsert.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `vminsert.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `vminsert.ingress.hostname` | Default host for the ingress record | `vminsert.local` | +| `vminsert.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `vminsert.ingress.path` | Default path for the ingress record | `/` | +| `vminsert.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `vminsert.ingress.tls` | Enable TLS configuration for the host defined at `vminsert.ingress.hostname` parameter | `false` | +| `vminsert.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `vminsert.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `vminsert.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `vminsert.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `vminsert.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `vminsert.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `vminsert.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vminsert.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vminsert.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vminsert.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vminsert.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vminsert.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vminsert.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Insert Metrics Parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vminsert.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vminsert.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vminsert.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vminsert.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vminsert.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vminsert.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vminsert.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vminsert.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vminsert.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `vminsert.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vminsert.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vminsert.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vminsert.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### VictoriaMetrics Storage Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| `vmstorage.image.registry` | VictoriaMetrics Storage image registry | `REGISTRY_NAME` | +| `vmstorage.image.repository` | VictoriaMetrics Storage image repository | `REPOSITORY_NAME/victoriametrics-vmstorage` | +| `vmstorage.image.digest` | VictoriaMetrics Storage image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vmstorage.image.pullPolicy` | VictoriaMetrics Storage image pull policy | `IfNotPresent` | +| `vmstorage.image.pullSecrets` | VictoriaMetrics Storage image pull secrets | `[]` | +| `vmstorage.replicaCount` | Number of VictoriaMetrics Storage replicas to deploy | `1` | +| `vmstorage.containerPorts.http` | VictoriaMetrics Storage http container port | `8482` | +| `vmstorage.containerPorts.vmselect` | VictoriaMetrics Storage vmselect container port | `8401` | +| `vmstorage.containerPorts.vminsert` | VictoriaMetrics Storage vminsert container port | `8400` | +| `vmstorage.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Storage containers | `true` | +| `vmstorage.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vmstorage.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vmstorage.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vmstorage.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vmstorage.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vmstorage.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Storage containers | `true` | +| `vmstorage.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vmstorage.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vmstorage.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vmstorage.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vmstorage.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vmstorage.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Storage containers | `false` | +| `vmstorage.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vmstorage.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vmstorage.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vmstorage.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vmstorage.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vmstorage.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vmstorage.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vmstorage.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vmstorage.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmstorage.resources is set (vmstorage.resources is recommended for production). | `small` | +| `vmstorage.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vmstorage.retentionPeriod` | Data retention period | `1` | +| `vmstorage.statefulsetAnnotations` | Annotations for VictoriaMetrics Storage statefulset | `{}` | +| `vmstorage.podSecurityContext.enabled` | Enabled VictoriaMetrics Storage pods' Security Context | `true` | +| `vmstorage.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vmstorage.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vmstorage.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vmstorage.podSecurityContext.fsGroup` | Set VictoriaMetrics Storage pod's Security Context fsGroup | `1001` | +| `vmstorage.containerSecurityContext.enabled` | Enabled VictoriaMetrics Storage containers' Security Context | `true` | +| `vmstorage.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vmstorage.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vmstorage.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vmstorage.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Storage containers' Security Context runAsNonRoot | `true` | +| `vmstorage.containerSecurityContext.privileged` | Set VictoriaMetrics Storage containers' Security Context privileged | `false` | +| `vmstorage.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Storage containers' Security Context runAsNonRoot | `true` | +| `vmstorage.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Storage container's privilege escalation | `false` | +| `vmstorage.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Storage container's Security Context runAsNonRoot | `["ALL"]` | +| `vmstorage.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Storage container's Security Context seccomp profile | `RuntimeDefault` | +| `vmstorage.command` | Override default container command (useful when using custom images) | `[]` | +| `vmstorage.args` | Override default container args (useful when using custom images) | `[]` | +| `vmstorage.extraArgs` | Add extra arguments to the default command | `[]` | +| `vmstorage.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `vmstorage.hostAliases` | VictoriaMetrics Storage pods host aliases | `[]` | +| `vmstorage.podLabels` | Extra labels for VictoriaMetrics Storage pods | `{}` | +| `vmstorage.podAnnotations` | Annotations for VictoriaMetrics Storage pods | `{}` | +| `vmstorage.podAffinityPreset` | Pod affinity preset. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmstorage.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vmstorage.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vmstorage.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vmstorage.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vmstorage.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Storage pods | `false` | +| `vmstorage.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vmstorage.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vmstorage.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vmstorage.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vmstorage.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vmstorage.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Storage pods | `false` | +| `vmstorage.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vmstorage.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vmstorage.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vmstorage.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vmstorage.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmstorage.nodeAffinityPreset.key` | Node label key to match. Ignored if `vmstorage.affinity` is set | `""` | +| `vmstorage.nodeAffinityPreset.values` | Node label values to match. Ignored if `vmstorage.affinity` is set | `[]` | +| `vmstorage.affinity` | Affinity for VictoriaMetrics Storage pods assignment | `{}` | +| `vmstorage.nodeSelector` | Node labels for VictoriaMetrics Storage pods assignment | `{}` | +| `vmstorage.podManagementPolicy` | Pod management policy for VictoriaMetrics Storage statefulset | `Parallel` | +| `vmstorage.tolerations` | Tolerations for VictoriaMetrics Storage pods assignment | `[]` | +| `vmstorage.updateStrategy.type` | VictoriaMetrics Storage statefulset strategy type | `RollingUpdate` | +| `vmstorage.priorityClassName` | VictoriaMetrics Storage pods' priorityClassName | `""` | +| `vmstorage.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vmstorage.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Storage pods | `""` | +| `vmstorage.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vmstorage.lifecycleHooks` | for the VictoriaMetrics Storage container(s) to automate configuration before or after startup | `{}` | +| `vmstorage.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Storage nodes | `[]` | +| `vmstorage.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Storage nodes | `""` | +| `vmstorage.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Storage nodes | `""` | +| `vmstorage.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Storage pod(s) | `[]` | +| `vmstorage.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Storage container(s) | `[]` | +| `vmstorage.sidecars` | Add additional sidecar containers to the VictoriaMetrics Storage pod(s) | `[]` | +| `vmstorage.initContainers` | Add additional init containers to the VictoriaMetrics Storage pod(s) | `[]` | +| `vmstorage.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vmstorage.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vmstorage.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vmstorage.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Storage Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | -------------------------------------------------------------------------------------------------- | ----------- | +| `vmstorage.service.type` | VictoriaMetrics Storage service type | `ClusterIP` | +| `vmstorage.service.ports.http` | VictoriaMetrics Storage service http port | `80` | +| `vmstorage.service.nodePorts.http` | Node port for HTTP | `""` | +| `vmstorage.service.clusterIP` | VictoriaMetrics Storage service Cluster IP | `""` | +| `vmstorage.service.loadBalancerIP` | VictoriaMetrics Storage service Load Balancer IP | `""` | +| `vmstorage.service.loadBalancerSourceRanges` | VictoriaMetrics Storage service Load Balancer sources | `[]` | +| `vmstorage.service.externalTrafficPolicy` | VictoriaMetrics Storage service external traffic policy | `Cluster` | +| `vmstorage.service.annotations` | Additional custom annotations for VictoriaMetrics Storage service | `{}` | +| `vmstorage.service.extraPorts` | Extra ports to expose in VictoriaMetrics Storage service (normally used with the `sidecars` value) | `[]` | +| `vmstorage.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vmstorage.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vmstorage.service.headless.annotations` | Annotations for the headless service. | `{}` | +| `vmstorage.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vmstorage.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vmstorage.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vmstorage.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmstorage.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmstorage.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vmstorage.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Storage Persistence Parameters + +| Name | Description | Value | +| ------------------------------------------------- | --------------------------------------------------------------------------------------- | ------- | +| `vmstorage.persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | +| `vmstorage.persistence.mountPath` | Persistent Volume mount root path | `/data` | +| `vmstorage.persistence.storageClass` | Persistent Volume storage class | `""` | +| `vmstorage.persistence.accessModes` | Persistent Volume access modes | `[]` | +| `vmstorage.persistence.size` | Persistent Volume size | `10Gi` | +| `vmstorage.persistence.dataSource` | Custom PVC data source | `{}` | +| `vmstorage.persistence.annotations` | Annotations for the PVC | `{}` | +| `vmstorage.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `vmstorage.persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` | +| `vmstorage.persistence.extraVolumeClaimTemplates` | Add additional VolumeClaimTemplates for enabling any plugins or any other purpose | `[]` | + +### VictoriaMetrics Storage Metrics Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vmstorage.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vmstorage.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vmstorage.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vmstorage.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vmstorage.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vmstorage.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vmstorage.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vmstorage.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vmstorage.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `vmstorage.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vmstorage.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vmstorage.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vmstorage.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### VictoriaMetrics Auth Parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------- | +| `vmauth.enabled` | Enable VictoriaMetrics Auth | `true` | +| `vmauth.image.registry` | VictoriaMetrics Auth image registry | `REGISTRY_NAME` | +| `vmauth.image.repository` | VictoriaMetrics Auth image repository | `REPOSITORY_NAME/victoriametrics-vmauth` | +| `vmauth.image.digest` | VictoriaMetrics Auth image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vmauth.image.pullPolicy` | VictoriaMetrics Auth image pull policy | `IfNotPresent` | +| `vmauth.image.pullSecrets` | VictoriaMetrics Auth image pull secrets | `[]` | +| `vmauth.replicaCount` | Number of VictoriaMetrics Auth replicas to deploy | `1` | +| `vmauth.containerPorts.http` | VictoriaMetrics Auth http container port | `8427` | +| `vmauth.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Auth containers | `true` | +| `vmauth.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vmauth.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vmauth.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vmauth.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vmauth.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vmauth.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Auth containers | `true` | +| `vmauth.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vmauth.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vmauth.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vmauth.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vmauth.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vmauth.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Auth containers | `false` | +| `vmauth.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vmauth.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vmauth.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vmauth.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vmauth.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vmauth.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vmauth.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vmauth.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vmauth.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmauth.resources is set (vmauth.resources is recommended for production). | `nano` | +| `vmauth.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vmauth.kind` | Define how to deploy VictoriaMetrics Auth (allowed values: deployment or daemonset) | `deployment` | +| `vmauth.annotations` | Annotations for VictoriaMetrics Auth Deployment or StatefulSet | `{}` | +| `vmauth.podSecurityContext.enabled` | Enabled VictoriaMetrics Auth pods' Security Context | `true` | +| `vmauth.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vmauth.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vmauth.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vmauth.podSecurityContext.fsGroup` | Set VictoriaMetrics Auth pod's Security Context fsGroup | `1001` | +| `vmauth.containerSecurityContext.enabled` | Enabled VictoriaMetrics Auth containers' Security Context | `true` | +| `vmauth.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vmauth.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vmauth.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vmauth.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Auth containers' Security Context runAsNonRoot | `true` | +| `vmauth.containerSecurityContext.privileged` | Set VictoriaMetrics Auth containers' Security Context privileged | `false` | +| `vmauth.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Auth containers' Security Context runAsNonRoot | `true` | +| `vmauth.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Auth container's privilege escalation | `false` | +| `vmauth.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Auth container's Security Context runAsNonRoot | `["ALL"]` | +| `vmauth.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Auth container's Security Context seccomp profile | `RuntimeDefault` | +| `vmauth.command` | Override default container command (useful when using custom images) | `[]` | +| `vmauth.args` | Override default container args (useful when using custom images) | `[]` | +| `vmauth.extraArgs` | Add extra arguments to the default command | `[]` | +| `vmauth.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `vmauth.hostAliases` | VictoriaMetrics Auth pods host aliases | `[]` | +| `vmauth.podLabels` | Extra labels for VictoriaMetrics Auth pods | `{}` | +| `vmauth.podAnnotations` | Annotations for VictoriaMetrics Auth pods | `{}` | +| `vmauth.podAffinityPreset` | Pod affinity preset. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmauth.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vmauth.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vmauth.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vmauth.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vmauth.configOverrides` | Overwrite or add extra configuration options to the chart default | `{}` | +| `vmauth.existingSecret` | The name of an existing Secret with configuration | `""` | +| `vmauth.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Auth pods | `false` | +| `vmauth.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vmauth.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vmauth.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vmauth.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vmauth.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vmauth.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Auth pods | `false` | +| `vmauth.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vmauth.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vmauth.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vmauth.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vmauth.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmauth.nodeAffinityPreset.key` | Node label key to match. Ignored if `vmauth.affinity` is set | `""` | +| `vmauth.nodeAffinityPreset.values` | Node label values to match. Ignored if `vmauth.affinity` is set | `[]` | +| `vmauth.affinity` | Affinity for VictoriaMetrics Auth pods assignment | `{}` | +| `vmauth.nodeSelector` | Node labels for VictoriaMetrics Auth pods assignment | `{}` | +| `vmauth.tolerations` | Tolerations for VictoriaMetrics Auth pods assignment | `[]` | +| `vmauth.updateStrategy.type` | VictoriaMetrics Auth statefulset strategy type | `RollingUpdate` | +| `vmauth.priorityClassName` | VictoriaMetrics Auth pods' priorityClassName | `""` | +| `vmauth.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vmauth.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Auth pods | `""` | +| `vmauth.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vmauth.lifecycleHooks` | for the VictoriaMetrics Auth container(s) to automate configuration before or after startup | `{}` | +| `vmauth.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Auth nodes | `[]` | +| `vmauth.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Auth nodes | `""` | +| `vmauth.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Auth nodes | `""` | +| `vmauth.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Auth pod(s) | `[]` | +| `vmauth.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Auth container(s) | `[]` | +| `vmauth.sidecars` | Add additional sidecar containers to the VictoriaMetrics Auth pod(s) | `[]` | +| `vmauth.initContainers` | Add additional init containers to the VictoriaMetrics Auth pod(s) | `[]` | + +### VictoriaMetrics Auth RBAC Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ---------------------------------------------------------------- | ------- | +| `vmauth.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vmauth.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vmauth.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vmauth.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Auth Traffic Exposure Parameters + +| Name | Description | Value | +| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `vmauth.service.type` | VictoriaMetrics Auth service type | `ClusterIP` | +| `vmauth.service.ports.http` | VictoriaMetrics Auth service http port | `8427` | +| `vmauth.service.nodePorts.http` | Node port for HTTP | `""` | +| `vmauth.service.clusterIP` | VictoriaMetrics Auth service Cluster IP | `""` | +| `vmauth.service.loadBalancerIP` | VictoriaMetrics Auth service Load Balancer IP | `""` | +| `vmauth.service.loadBalancerSourceRanges` | VictoriaMetrics Auth service Load Balancer sources | `[]` | +| `vmauth.service.externalTrafficPolicy` | VictoriaMetrics Auth service external traffic policy | `Cluster` | +| `vmauth.service.annotations` | Additional custom annotations for VictoriaMetrics Auth service | `{}` | +| `vmauth.service.extraPorts` | Extra ports to expose in VictoriaMetrics Auth service (normally used with the `sidecars` value) | `[]` | +| `vmauth.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vmauth.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vmauth.ingress.enabled` | Enable ingress record generation for VictoriaMetrics Auth | `false` | +| `vmauth.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `vmauth.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `vmauth.ingress.hostname` | Default host for the ingress record | `vmauth.local` | +| `vmauth.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `vmauth.ingress.path` | Default path for the ingress record | `/` | +| `vmauth.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `vmauth.ingress.tls` | Enable TLS configuration for the host defined at `vmauth.ingress.hostname` parameter | `false` | +| `vmauth.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `vmauth.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `vmauth.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `vmauth.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `vmauth.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `vmauth.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `vmauth.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vmauth.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vmauth.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vmauth.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmauth.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmauth.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vmauth.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Auth Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vmauth.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vmauth.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vmauth.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vmauth.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vmauth.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vmauth.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vmauth.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vmauth.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vmauth.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `vmauth.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vmauth.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vmauth.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vmauth.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### VictoriaMetrics Agent Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- | +| `vmagent.enabled` | Enable VictoriaMetrics Agent | `true` | +| `vmagent.image.registry` | VictoriaMetrics Agent image registry | `REGISTRY_NAME` | +| `vmagent.image.repository` | VictoriaMetrics Agent image repository | `REPOSITORY_NAME/victoriametrics-vmagent` | +| `vmagent.image.digest` | VictoriaMetrics Agent image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vmagent.image.pullPolicy` | VictoriaMetrics Agent image pull policy | `IfNotPresent` | +| `vmagent.image.pullSecrets` | VictoriaMetrics Agent image pull secrets | `[]` | +| `vmagent.replicaCount` | Number of VictoriaMetrics Agent replicas to deploy | `1` | +| `vmagent.containerPorts.http` | VictoriaMetrics Agent http container port | `8429` | +| `vmagent.containerPorts.graphite` | VictoriaMetrics Agent graphite container port | `2003` | +| `vmagent.containerPorts.opentsdb` | VictoriaMetrics Agent opentsdb container port | `4242` | +| `vmagent.containerPorts.influxdb` | VictoriaMetrics Agent influxdb container port | `8089` | +| `vmagent.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Agent containers | `true` | +| `vmagent.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vmagent.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vmagent.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vmagent.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vmagent.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vmagent.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Agent containers | `true` | +| `vmagent.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vmagent.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vmagent.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vmagent.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vmagent.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vmagent.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Agent containers | `false` | +| `vmagent.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vmagent.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vmagent.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vmagent.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vmagent.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vmagent.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vmagent.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vmagent.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vmagent.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmagent.resources is set (vmagent.resources is recommended for production). | `nano` | +| `vmagent.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vmagent.kind` | Define how to deploy VictoriaMetrics Agent (allowed values: deployment or daemonset) | `deployment` | +| `vmagent.annotations` | Annotations for VictoriaMetrics Agent Deployment or StatefulSet | `{}` | +| `vmagent.podSecurityContext.enabled` | Enabled VictoriaMetrics Agent pods' Security Context | `true` | +| `vmagent.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vmagent.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vmagent.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vmagent.podSecurityContext.fsGroup` | Set VictoriaMetrics Agent pod's Security Context fsGroup | `1001` | +| `vmagent.containerSecurityContext.enabled` | Enabled VictoriaMetrics Agent containers' Security Context | `true` | +| `vmagent.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vmagent.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vmagent.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vmagent.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Agent containers' Security Context runAsNonRoot | `true` | +| `vmagent.containerSecurityContext.privileged` | Set VictoriaMetrics Agent containers' Security Context privileged | `false` | +| `vmagent.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Agent containers' Security Context runAsNonRoot | `true` | +| `vmagent.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Agent container's privilege escalation | `false` | +| `vmagent.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Agent container's Security Context runAsNonRoot | `["ALL"]` | +| `vmagent.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Agent container's Security Context seccomp profile | `RuntimeDefault` | +| `vmagent.command` | Override default container command (useful when using custom images) | `[]` | +| `vmagent.args` | Override default container args (useful when using custom images) | `[]` | +| `vmagent.extraArgs` | Add extra arguments to the default command | `[]` | +| `vmagent.automountServiceAccountToken` | Mount Service Account token in pod | `true` | +| `vmagent.hostAliases` | VictoriaMetrics Agent pods host aliases | `[]` | +| `vmagent.podLabels` | Extra labels for VictoriaMetrics Agent pods | `{}` | +| `vmagent.podAnnotations` | Annotations for VictoriaMetrics Agent pods | `{}` | +| `vmagent.podAffinityPreset` | Pod affinity preset. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmagent.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vmagent.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vmagent.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vmagent.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vmagent.scrapeConfigOverrides` | Overwrite or add extra scraping configuration options to the chart default | `{}` | +| `vmagent.enableListeners.influxdb` | Enable influxdb listener | `false` | +| `vmagent.enableListeners.opentsdb` | Enable graphite listener | `false` | +| `vmagent.enableListeners.graphite` | Enable opentsdb listener | `false` | +| `vmagent.existingScrapeConfigMap` | The name of an existing ConfigMap with the scrape configuration | `""` | +| `vmagent.namespaced` | Only scrape in the deployed namespace | `false` | +| `vmagent.allowedMetricsEndpoints` | Allowed metrics endpoints to scrape (when not namespaced) | `[]` | +| `vmagent.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Agent pods | `false` | +| `vmagent.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vmagent.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vmagent.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vmagent.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vmagent.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vmagent.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Agent pods | `false` | +| `vmagent.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vmagent.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vmagent.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vmagent.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vmagent.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmagent.nodeAffinityPreset.key` | Node label key to match. Ignored if `vmagent.affinity` is set | `""` | +| `vmagent.nodeAffinityPreset.values` | Node label values to match. Ignored if `vmagent.affinity` is set | `[]` | +| `vmagent.affinity` | Affinity for VictoriaMetrics Agent pods assignment | `{}` | +| `vmagent.nodeSelector` | Node labels for VictoriaMetrics Agent pods assignment | `{}` | +| `vmagent.tolerations` | Tolerations for VictoriaMetrics Agent pods assignment | `[]` | +| `vmagent.updateStrategy.type` | VictoriaMetrics Agent statefulset strategy type | `RollingUpdate` | +| `vmagent.priorityClassName` | VictoriaMetrics Agent pods' priorityClassName | `""` | +| `vmagent.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vmagent.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Agent pods | `""` | +| `vmagent.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vmagent.lifecycleHooks` | for the VictoriaMetrics Agent container(s) to automate configuration before or after startup | `{}` | +| `vmagent.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Agent nodes | `[]` | +| `vmagent.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Agent nodes | `""` | +| `vmagent.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Agent nodes | `""` | +| `vmagent.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Agent pod(s) | `[]` | +| `vmagent.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Agent container(s) | `[]` | +| `vmagent.sidecars` | Add additional sidecar containers to the VictoriaMetrics Agent pod(s) | `[]` | +| `vmagent.initContainers` | Add additional init containers to the VictoriaMetrics Agent pod(s) | `[]` | + +### VictoriaMetrics Agent RBAC Parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ---------------------------------------------------------------- | ------- | +| `vmagent.rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `vmagent.rbac.rules` | Custom RBAC rules to set | `[]` | +| `vmagent.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vmagent.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vmagent.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vmagent.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Agent Traffic Exposure Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------- | +| `vmagent.service.type` | VictoriaMetrics Agent service type | `ClusterIP` | +| `vmagent.service.ports.http` | VictoriaMetrics Agent service http port | `8429` | +| `vmagent.service.ports.graphite` | VictoriaMetrics Agent service graphite port | `2003` | +| `vmagent.service.ports.opentsdb` | VictoriaMetrics Agent service opentsdb port | `4242` | +| `vmagent.service.ports.influxdb` | VictoriaMetrics Agent service influxdb port | `8089` | +| `vmagent.service.nodePorts.http` | Node port for HTTP | `""` | +| `vmagent.service.nodePorts.graphite` | Node port for graphite | `""` | +| `vmagent.service.nodePorts.opentsdb` | Node port for opentsdb | `""` | +| `vmagent.service.nodePorts.influxdb` | Node port for influxdb | `""` | +| `vmagent.service.clusterIP` | VictoriaMetrics Agent service Cluster IP | `""` | +| `vmagent.service.loadBalancerIP` | VictoriaMetrics Agent service Load Balancer IP | `""` | +| `vmagent.service.loadBalancerSourceRanges` | VictoriaMetrics Agent service Load Balancer sources | `[]` | +| `vmagent.service.externalTrafficPolicy` | VictoriaMetrics Agent service external traffic policy | `Cluster` | +| `vmagent.service.annotations` | Additional custom annotations for VictoriaMetrics Agent service | `{}` | +| `vmagent.service.extraPorts` | Extra ports to expose in VictoriaMetrics Agent service (normally used with the `sidecars` value) | `[]` | +| `vmagent.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vmagent.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vmagent.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vmagent.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vmagent.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vmagent.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmagent.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmagent.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vmagent.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Agent Metrics Parameters + +| Name | Description | Value | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vmagent.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vmagent.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vmagent.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vmagent.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vmagent.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vmagent.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vmagent.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vmagent.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vmagent.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `vmagent.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vmagent.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vmagent.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vmagent.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### VictoriaMetrics Alert Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- | +| `vmalert.enabled` | Enable VictoriaMetrics Alert | `false` | +| `vmalert.image.registry` | VictoriaMetrics Alert image registry | `REGISTRY_NAME` | +| `vmalert.image.repository` | VictoriaMetrics Alert image repository | `REPOSITORY_NAME/victoriametrics-vmalert` | +| `vmalert.image.digest` | VictoriaMetrics Alert image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `vmalert.image.pullPolicy` | VictoriaMetrics Alert image pull policy | `IfNotPresent` | +| `vmalert.image.pullSecrets` | VictoriaMetrics Alert image pull secrets | `[]` | +| `vmalert.replicaCount` | Number of VictoriaMetrics Alert replicas to deploy | `1` | +| `vmalert.containerPorts.http` | VictoriaMetrics Alert http container port | `8429` | +| `vmalert.livenessProbe.enabled` | Enable livenessProbe on VictoriaMetrics Alert containers | `true` | +| `vmalert.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `vmalert.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `vmalert.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `vmalert.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `vmalert.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `vmalert.readinessProbe.enabled` | Enable readinessProbe on VictoriaMetrics Alert containers | `true` | +| `vmalert.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `vmalert.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `vmalert.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `vmalert.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `vmalert.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `vmalert.startupProbe.enabled` | Enable startupProbe on VictoriaMetrics Alert containers | `false` | +| `vmalert.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `vmalert.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `vmalert.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `vmalert.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `vmalert.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `vmalert.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `vmalert.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `vmalert.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `vmalert.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmalert.resources is set (vmalert.resources is recommended for production). | `nano` | +| `vmalert.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `vmalert.datasourceUrl` | URL to an external datasource (uses VictoriaMetrics Insert if not set) | `""` | +| `vmalert.notifierUrl` | URL to a notifier like AlertManager (necessary when alert rules are set) | `""` | +| `vmalert.deploymentAnnotations` | Annotations for VictoriaMetrics Alert Deployment | `{}` | +| `vmalert.podSecurityContext.enabled` | Enabled VictoriaMetrics Alert pods' Security Context | `true` | +| `vmalert.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `vmalert.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `vmalert.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `vmalert.podSecurityContext.fsGroup` | Set VictoriaMetrics Alert pod's Security Context fsGroup | `1001` | +| `vmalert.containerSecurityContext.enabled` | Enabled VictoriaMetrics Alert containers' Security Context | `true` | +| `vmalert.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `vmalert.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `vmalert.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `vmalert.containerSecurityContext.runAsNonRoot` | Set VictoriaMetrics Alert containers' Security Context runAsNonRoot | `true` | +| `vmalert.containerSecurityContext.privileged` | Set VictoriaMetrics Alert containers' Security Context privileged | `false` | +| `vmalert.containerSecurityContext.readOnlyRootFilesystem` | Set VictoriaMetrics Alert containers' Security Context runAsNonRoot | `true` | +| `vmalert.containerSecurityContext.allowPrivilegeEscalation` | Set VictoriaMetrics Alert container's privilege escalation | `false` | +| `vmalert.containerSecurityContext.capabilities.drop` | Set VictoriaMetrics Alert container's Security Context runAsNonRoot | `["ALL"]` | +| `vmalert.containerSecurityContext.seccompProfile.type` | Set VictoriaMetrics Alert container's Security Context seccomp profile | `RuntimeDefault` | +| `vmalert.command` | Override default container command (useful when using custom images) | `[]` | +| `vmalert.args` | Override default container args (useful when using custom images) | `[]` | +| `vmalert.extraArgs` | Add extra arguments to the default command | `[]` | +| `vmalert.automountServiceAccountToken` | Mount Service Account token in pod | `true` | +| `vmalert.hostAliases` | VictoriaMetrics Alert pods host aliases | `[]` | +| `vmalert.podLabels` | Extra labels for VictoriaMetrics Alert pods | `{}` | +| `vmalert.podAnnotations` | Annotations for VictoriaMetrics Alert pods | `{}` | +| `vmalert.podAffinityPreset` | Pod affinity preset. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmalert.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `vmalert.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `vmalert.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `vmalert.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | +| `vmalert.rulesConfigOverrides` | Overwrite or add extra rules configuration options to the chart default | `{}` | +| `vmalert.existingRulesConfigMap` | The name of an existing ConfigMap with the rules configuration | `""` | +| `vmalert.autoscaling.vpa.enabled` | Enable VPA for VictoriaMetrics Alert pods | `false` | +| `vmalert.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `vmalert.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `vmalert.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `vmalert.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `vmalert.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `vmalert.autoscaling.hpa.enabled` | Enable HPA for VictoriaMetrics Alert pods | `false` | +| `vmalert.autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `vmalert.autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `vmalert.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `vmalert.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `vmalert.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `vmalert.nodeAffinityPreset.key` | Node label key to match. Ignored if `vmalert.affinity` is set | `""` | +| `vmalert.nodeAffinityPreset.values` | Node label values to match. Ignored if `vmalert.affinity` is set | `[]` | +| `vmalert.affinity` | Affinity for VictoriaMetrics Alert pods assignment | `{}` | +| `vmalert.nodeSelector` | Node labels for VictoriaMetrics Alert pods assignment | `{}` | +| `vmalert.tolerations` | Tolerations for VictoriaMetrics Alert pods assignment | `[]` | +| `vmalert.updateStrategy.type` | VictoriaMetrics Alert statefulset strategy type | `RollingUpdate` | +| `vmalert.priorityClassName` | VictoriaMetrics Alert pods' priorityClassName | `""` | +| `vmalert.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `vmalert.schedulerName` | Name of the k8s scheduler (other than default) for VictoriaMetrics Alert pods | `""` | +| `vmalert.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `vmalert.lifecycleHooks` | for the VictoriaMetrics Alert container(s) to automate configuration before or after startup | `{}` | +| `vmalert.extraEnvVars` | Array with extra environment variables to add to VictoriaMetrics Alert nodes | `[]` | +| `vmalert.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for VictoriaMetrics Alert nodes | `""` | +| `vmalert.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for VictoriaMetrics Alert nodes | `""` | +| `vmalert.extraVolumes` | Optionally specify extra list of additional volumes for the VictoriaMetrics Alert pod(s) | `[]` | +| `vmalert.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Alert container(s) | `[]` | +| `vmalert.sidecars` | Add additional sidecar containers to the VictoriaMetrics Alert pod(s) | `[]` | +| `vmalert.initContainers` | Add additional init containers to the VictoriaMetrics Alert pod(s) | `[]` | +| `vmalert.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `vmalert.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `vmalert.serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `vmalert.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | + +### VictoriaMetrics Alert Traffic Exposure Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------- | +| `vmalert.service.type` | VictoriaMetrics Alert service type | `ClusterIP` | +| `vmalert.service.ports.http` | VictoriaMetrics Alert service http port | `8429` | +| `vmalert.service.nodePorts.http` | Node port for HTTP | `""` | +| `vmalert.service.clusterIP` | VictoriaMetrics Alert service Cluster IP | `""` | +| `vmalert.service.loadBalancerIP` | VictoriaMetrics Alert service Load Balancer IP | `""` | +| `vmalert.service.loadBalancerSourceRanges` | VictoriaMetrics Alert service Load Balancer sources | `[]` | +| `vmalert.service.externalTrafficPolicy` | VictoriaMetrics Alert service external traffic policy | `Cluster` | +| `vmalert.service.annotations` | Additional custom annotations for VictoriaMetrics Alert service | `{}` | +| `vmalert.service.extraPorts` | Extra ports to expose in VictoriaMetrics Alert service (normally used with the `sidecars` value) | `[]` | +| `vmalert.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `vmalert.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `vmalert.networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `vmalert.networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `vmalert.networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `vmalert.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmalert.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `vmalert.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `vmalert.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### VictoriaMetrics Alert Metrics Parameters + +| Name | Description | Value | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `vmalert.metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `vmalert.metrics.annotations` | Additional custom annotations for the service | `{}` | +| `vmalert.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `vmalert.metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `vmalert.metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `vmalert.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `vmalert.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `vmalert.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `vmalert.metrics.serviceMonitor.interval` | Interval at which metrics should be rulesd. | `""` | +| `vmalert.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `vmalert.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `vmalert.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `vmalert.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + +### Default Init Containers Parameters + +| Name | Description | Value | +| --------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `defaultInitContainers.volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `defaultInitContainers.volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` | +| `defaultInitContainers.volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` | +| `defaultInitContainers.volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `defaultInitContainers.volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | +| `defaultInitContainers.volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | +| `defaultInitContainers.volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `defaultInitContainers.volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.enabled` | Enable securityContext in the init container | `true` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release --set vmagent.enableListeners.influxdb=true oci://REGISTRY_NAME/REPOSITORY_NAME/victoriametrics +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Drycc, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=Drycccharts`. + +The above command install VictoriaMetrics chart with the InfluxDB listener enabled in the `vmagent` component. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/victoriametrics +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Drycc, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=Drycccharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/Drycc/charts/tree/main/Drycc/victoriametrics/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Drycc's Helm charts in [this troubleshooting guide](https://docs.Drycc.com/general/how-to/troubleshoot-helm-chart-issues). + +## License + +Copyright © 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/NOTES.txt b/addons/victoriametrics/1/chart/victoriametrics/templates/NOTES.txt new file mode 100644 index 00000000..f555e028 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/NOTES.txt @@ -0,0 +1,168 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +Did you know there are enterprise versions of the Drycc catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Drycc Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/drycc for more information. + +** Please be patient while the chart is being deployed ** + +The following controllers have been deployed: + + - vmselect + - vminsert + - vmstorage + {{- if .Values.vmauth.enabled }} + - vmauth + {{- end }} + {{- if .Values.vmagent.enabled }} + - vmagent + {{- end }} + {{- if .Values.vmalert.enabled }} + - vmalert + {{- end }} + +Check the status of the pods by running this command: + + kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }} + +{{- if .Values.vmauth.enabled }} +Your VictoriaMetrics site can be accessed through the following DNS name from within your cluster: + + {{ include "victoriametrics.vmauth.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} (port {{ .Values.vmauth.service.ports.http }}) + +To access your VictoriaMetrics site from outside the cluster follow the steps below: + +{{- if .Values.vmauth.ingress.enabled }} + +Get the VictoriaMetrics URL and associate VictoriaMetrics hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "VictoriaMetrics Select URL: http{{ if .Values.vmauth.ingress.tls }}s{{ end }}://{{ .Values.vmauth.ingress.hostname }}/select" + echo "VictoriaMetrics Insert URL: http{{ if .Values.vmauth.ingress.tls }}s{{ end }}://{{ .Values.vmauth.ingress.hostname }}/insert" + echo "$CLUSTER_IP {{ .Values.vmauth.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else }} +{{- $port := .Values.vmauth.service.ports.http | toString }} + +Get the VictoriaMetrics URL by running these commands: + +{{- if contains "NodePort" .Values.vmauth.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "victoriametrics.vmauth.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "VictoriaMetrics Select URL: http://$NODE_IP:$NODE_PORT/select" + echo "VictoriaMetrics Insert URL: http://$NODE_IP:$NODE_PORT/insert" + +{{- else if contains "LoadBalancer" .Values.vmauth.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "victoriametrics.vmauth.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "victoriametrics.vmauth.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "VictoriaMetrics Select URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.vmauth.service.ports.http }}{{ end }}/select" + echo "VictoriaMetrics Insert URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.vmauth.service.ports.http }}{{ end }}/select" + +{{- else if contains "ClusterIP" .Values.vmauth.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "victoriametrics.vmauth.fullname" . }} {{ .Values.vmauth.service.ports.http }}:{{ .Values.vmauth.service.ports.http }} & + echo "VictoriaMetrics Select URL: http://127.0.0.1{{- if ne $port "80" }}:{{ .Values.vmauth.service.ports.http }}{{ end }}/select" + echo "VictoriaMetrics Insert URL: http://127.0.0.1{{- if ne $port "80" }}:{{ .Values.vmauth.service.ports.http }}{{ end }}/insert" + +{{- end }} +{{- end }} +{{- else }} + +Your VictoriaMetrics Select site can be accessed through the following DNS name from within your cluster: + + {{ include "victoriametrics.vmselect.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} (port {{ .Values.vmselect.service.ports.http }}) + +To access your VictoriaMetrics site from outside the cluster follow the steps below: + +{{- if .Values.vmselect.ingress.enabled }} + +Get the VictoriaMetrics URL and associate VictoriaMetrics hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "VictoriaMetrics Select URL: http{{ if .Values.vmselect.ingress.tls }}s{{ end }}://{{ .Values.vmselect.ingress.hostname }}/select" + echo "$CLUSTER_IP {{ .Values.vmselect.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else }} +{{- $port := .Values.vmselect.service.ports.http | toString }} + +Get the VictoriaMetrics URL by running these commands: + +{{- if contains "NodePort" .Values.vmselect.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "victoriametrics.vmselect.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "VictoriaMetrics Select URL: http://$NODE_IP:$NODE_PORT/select" + +{{- else if contains "LoadBalancer" .Values.vmselect.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "victoriametrics.vmselect.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "victoriametrics.vmselect.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "VictoriaMetrics Select URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.vmselect.service.ports.http }}{{ end }}/select" + +{{- else if contains "ClusterIP" .Values.vmselect.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "victoriametrics.vmselect.fullname" . }} {{ .Values.vmselect.service.ports.http }}:{{ .Values.vmselect.service.ports.http }} & + echo "VictoriaMetrics Select URL: http://127.0.0.1{{- if ne $port "80" }}:{{ .Values.vmselect.service.ports.http }}{{ end }}/select" + +{{- end }} +{{- end }} + +Your VictoriaMetrics Insert site can be accessed through the following DNS name from within your cluster: + + {{ include "victoriametrics.vminsert.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} (port {{ .Values.vminsert.service.ports.http }}) + +To access your VictoriaMetrics site from outside the cluster follow the steps below: + +{{- if .Values.vminsert.ingress.enabled }} + +Get the VictoriaMetrics URL and associate VictoriaMetrics hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "VictoriaMetrics Insert URL: http{{ if .Values.vminsert.ingress.tls }}s{{ end }}://{{ .Values.vminsert.ingress.hostname }}/insert" + echo "$CLUSTER_IP {{ .Values.vminsert.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else }} +{{- $port := .Values.vminsert.service.ports.http | toString }} + +Get the VictoriaMetrics URL by running these commands: + +{{- if contains "NodePort" .Values.vminsert.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "victoriametrics.vminsert.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "VictoriaMetrics Insert URL: http://$NODE_IP:$NODE_PORT/insert" + +{{- else if contains "LoadBalancer" .Values.vminsert.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "victoriametrics.vminsert.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "victoriametrics.vminsert.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "VictoriaMetrics Insert URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.vminsert.service.ports.http }}{{ end }}/insert" + +{{- else if contains "ClusterIP" .Values.vminsert.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "victoriametrics.vminsert.fullname" . }} {{ .Values.vminsert.service.ports.http }}:{{ .Values.vminsert.service.ports.http }} & + echo "VictoriaMetrics Insert URL: http://127.0.0.1{{- if ne $port "80" }}:{{ .Values.vminsert.service.ports.http }}{{ end }}/insert" + +{{- end }} +{{- end }} + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.vmselect.image }} +{{- include "common.warnings.rollingTag" .Values.vminsert.image }} +{{- include "common.warnings.rollingTag" .Values.vmstorage.image }} +{{- include "common.warnings.rollingTag" .Values.vmauth.image }} +{{- include "common.warnings.rollingTag" .Values.vmagent.image }} +{{- include "common.warnings.rollingTag" .Values.vmalert.image }} +{{- include "victoriametrics.validateValues" . }} +{{- include "common.warnings.resources" (dict "sections" (list "vmselect" "vmstorage" "vminsert" "vmauth" "vmagent" "vmalert" "defaultInitContainers.volumePermissions") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.vmselect.image .Values.vminsert.image .Values.vmstorage.image .Values.vmauth.image .Values.vmagent.image .Values.vmalert.image .Values.defaultInitContainers.volumePermissions.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.vmselect.image .Values.vminsert.image .Values.vmstorage.image .Values.vmauth.image .Values.vmagent.image .Values.vmalert.image .Values.defaultInitContainers.volumePermissions.image) "context" $) }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/_helpers.tpl b/addons/victoriametrics/1/chart/victoriametrics/templates/_helpers.tpl new file mode 100644 index 00000000..c2b9873a --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/_helpers.tpl @@ -0,0 +1,283 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "victoriametrics.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.vmselect.image .Values.vminsert.image .Values.vmstorage.image .Values.vmauth.image .Values.vmagent.image .Values.defaultInitContainers.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper victoriametrics.vmselect.fullname +*/}} +{{- define "victoriametrics.vmselect.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vmselect" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Select image name +*/}} +{{- define "victoriametrics.vmselect.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vmselect.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Select) +*/}} +{{- define "victoriametrics.vmselect.serviceAccountName" -}} +{{- if .Values.vmselect.serviceAccount.create -}} + {{ default (include "victoriametrics.vmselect.fullname" .) .Values.vmselect.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vmselect.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Insert fullname +*/}} +{{- define "victoriametrics.vminsert.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vminsert" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Insert image name +*/}} +{{- define "victoriametrics.vminsert.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vminsert.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Insert) +*/}} +{{- define "victoriametrics.vminsert.serviceAccountName" -}} +{{- if .Values.vminsert.serviceAccount.create -}} + {{ default (include "victoriametrics.vminsert.fullname" .) .Values.vminsert.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vminsert.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Storage fullname +*/}} +{{- define "victoriametrics.vmstorage.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vmstorage" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Storage image name +*/}} +{{- define "victoriametrics.vmstorage.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vmstorage.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Storage) +*/}} +{{- define "victoriametrics.vmstorage.serviceAccountName" -}} +{{- if .Values.vmstorage.serviceAccount.create -}} + {{ default (include "victoriametrics.vmstorage.fullname" .) .Values.vmstorage.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vmstorage.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Auth fullname +*/}} +{{- define "victoriametrics.vmauth.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vmauth" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Auth image name +*/}} +{{- define "victoriametrics.vmauth.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vmauth.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Auth) +*/}} +{{- define "victoriametrics.vmauth.serviceAccountName" -}} +{{- if .Values.vmauth.serviceAccount.create -}} + {{ default (include "victoriametrics.vmauth.fullname" .) .Values.vmauth.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vmauth.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Name of the VictoriaMetrics Auth Secret +*/}} +{{- define "victoriametrics.vmauth.secretName" -}} +{{- if .Values.vmauth.existingSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.existingSecret "context" $) -}} +{{- else -}} + {{- include "victoriametrics.vmauth.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Agent fullname +*/}} +{{- define "victoriametrics.vmagent.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vmagent" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Agent image name +*/}} +{{- define "victoriametrics.vmagent.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vmagent.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Agent) +*/}} +{{- define "victoriametrics.vmagent.serviceAccountName" -}} +{{- if .Values.vmagent.serviceAccount.create -}} + {{ default (include "victoriametrics.vmagent.fullname" .) .Values.vmagent.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vmagent.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Name of the VictoriaMetrics Agent Secret +*/}} +{{- define "victoriametrics.vmagent.scrapeConfigMapName" -}} +{{- if .Values.vmagent.existingScrapeConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.existingScrapeConfigMap "context" $) -}} +{{- else -}} + {{- include "victoriametrics.vmagent.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Alert fullname +*/}} +{{- define "victoriametrics.vmalert.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "vmalert" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper VictoriaMetrics Alert image name +*/}} +{{- define "victoriametrics.vmalert.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.vmalert.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create the name of the service account to use (VictoriaMetrics Alert) +*/}} +{{- define "victoriametrics.vmalert.serviceAccountName" -}} +{{- if .Values.vmalert.serviceAccount.create -}} + {{ default (include "victoriametrics.vmalert.fullname" .) .Values.vmalert.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.vmalert.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Name of the VictoriaMetrics Alert Secret +*/}} +{{- define "victoriametrics.vmalert.rulesConfigMapName" -}} +{{- if .Values.vmalert.existingRulesConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.existingRulesConfigMap "context" $) -}} +{{- else -}} + {{- include "victoriametrics.vmalert.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "victoriametrics.volume-permissions.image" -}} +{{- include "common.images.image" ( dict "imageRoot" .Values.defaultInitContainers.volumePermissions.image "global" .Values.global ) -}} +{{- end -}} + +{{- define "victoriametrics.init-containers.volume-permissions" -}} +{{- /* As most Drycc charts have volumePermissions in the root, we add this overwrite to maintain a similar UX */}} +- name: volume-permissions + image: {{ include "victoriametrics.volume-permissions.image" . }} + imagePullPolicy: {{ .context.Values.defaultInitContainers.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .componentValues.persistence.mountPath }} + {{- else }} + chown -R {{ .componentValues.containerSecurityContext.runAsUser }}:{{ .componentValues.podSecurityContext.fsGroup }} {{ .componentValues.persistence.mountPath }} + {{- end }} + {{- if .Values.defaultInitContainers.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.volumePermissions.containerSecurityContext "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.volumePermissions.resources }} + resources: {{- toYaml .Values.defaultInitContainers.volumePermissions.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.volumePermissions.resourcesPreset) | nindent 4 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .componentValues.persistence.mountPath }} + {{- if .componentValues.persistence.subPath }} + subPath: {{ .componentValues.persistence.subPath }} + {{- end }} +{{- end -}} + +{{/* +Validate values for victoriametrics. +*/}} +{{- define "victoriametrics.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "victoriametrics.vmselect.kind" .) -}} +{{- $messages := append $messages (include "victoriametrics.vmauth.kind" .) -}} +{{- $messages := append $messages (include "victoriametrics.vmagent.kind" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Function to validate the vmagent kind +*/}} +{{- define "victoriametrics.vmagent.kind" -}} +{{- if .Values.vmagent.enabled -}} +{{- $kind := lower .Values.vmagent.kind -}} +{{- $allowedKinds := list "daemonset" "deployment" -}} +{{- if not (has $kind $allowedKinds) -}} +vmagent: Incorrect kind {{ $kind }}. Allowed values: {{ join "," $allowedKinds }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Function to validate the vmselect kind +*/}} +{{- define "victoriametrics.vmselect.kind" -}} +{{- $kind := lower .Values.vmselect.kind -}} +{{- $allowedKinds := list "statefulset" "deployment" -}} +{{- if not (has $kind $allowedKinds) -}} +vmselect: Incorrect kind {{ $kind }}. Allowed values: {{ join "," $allowedKinds }} +{{- end -}} +{{- end -}} + +{{/* +Function to validate the vmauth kind +*/}} +{{- define "victoriametrics.vmauth.kind" -}} +{{- if .Values.vmauth.enabled -}} +{{- $kind := lower .Values.vmauth.kind -}} +{{- $allowedKinds := list "deployment" "daemonset" -}} +{{- if not (has $kind $allowedKinds) -}} +vmauth: Incorrect kind {{ $kind }}. Allowed values: {{ join "," $allowedKinds }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/bind.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/extra-list.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/extra-list.yaml new file mode 100644 index 00000000..329f5c65 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/configmap.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/configmap.yaml new file mode 100644 index 00000000..17631a7b --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/configmap.yaml @@ -0,0 +1,215 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* Adding the helper in configmap.yaml for better readability */}} +{{- define "victoriametrics.vmagent.defaultScrapeConfig" -}} +global: + scrape_interval: {{ .Values.vmagent.scrapeInterval | default "30s" }} + scrape_timeout: {{ .Values.vmagent.scrapeTimeout | default "30s" }} +scrape_configs: + - job_name: vmagent + static_configs: + - targets: ["localhost:{{ .Values.vmagent.containerPorts.http }}"] + + ## COPY from Prometheus helm chart https://github.com/helm/charts/blob/master/stable/prometheus/values.yaml + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: "kubernetes-service-endpoints" + kubernetes_sd_configs: + - role: endpointslices + namespaces: + own_namespace: true + relabel_configs: + - action: drop + source_labels: [__meta_kubernetes_pod_container_init] + regex: true + - action: keep_if_equal + source_labels: [__meta_kubernetes_service_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number] + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: + [ + __address__, + __meta_kubernetes_service_annotation_prometheus_io_port, + ] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_pod_container_name] + target_label: container + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + - source_labels: [__meta_kubernetes_service_name] + target_label: job + replacement: ${1} + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # + - job_name: "kubernetes-service-endpoints-slow" + scrape_interval: 5m + scrape_timeout: 30s + kubernetes_sd_configs: + - role: endpointslices + namespaces: + own_namespace: true + relabel_configs: + - action: drop + source_labels: [__meta_kubernetes_pod_container_init] + regex: true + - action: keep_if_equal + source_labels: [__meta_kubernetes_service_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number] + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: + [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: + [ + __address__, + __meta_kubernetes_service_annotation_prometheus_io_port, + ] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_pod_container_name] + target_label: container + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + - source_labels: [__meta_kubernetes_service_name] + target_label: job + replacement: ${1} + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + # + - job_name: "kubernetes-pods" + kubernetes_sd_configs: + - role: pod + namespaces: + own_namespace: true + relabel_configs: + - action: drop + source_labels: [__meta_kubernetes_pod_container_init] + regex: true + - action: keep_if_equal + source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number] + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: + [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_pod_container_name] + target_label: container + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + ## End of COPY +{{- end }} + + +{{- if and .Values.vmagent.enabled (not .Values.vmagent.existingScrapeConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- /* Convert the default configuration and extra overrides */ -}} + {{- $defaultConfiguration := include "victoriametrics.vmagent.defaultScrapeConfig" . | fromYaml -}} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.vmagent.scrapeConfigOverrides "context" $) | fromYaml -}} + {{- /* add extraJobs */}} + {{- $extraJobs := .Values.vmagent.extraJobs | default list -}} + {{- if $extraJobs -}} + {{- $defaultJobs := $defaultConfiguration.scrape_configs | default list -}} + {{- $mergedJobs := concat $defaultJobs $extraJobs -}} + {{- $defaultConfiguration = set $defaultConfiguration "scrape_configs" $mergedJobs -}} + {{- end -}} + {{- /* Merge both maps and render the configuration */}} + scrape.yml: | + {{- mergeOverwrite $defaultConfiguration $overrideConfiguration | toYaml | nindent 4 }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/dep-ds.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/dep-ds.yaml new file mode 100644 index 00000000..cb5de41e --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/dep-ds.yaml @@ -0,0 +1,215 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmagent.enabled }} +{{- $kind := lower .Values.vmagent.kind }} +{{- if eq $kind "daemonset" }} +apiVersion: {{ include "common.capabilities.daemonset.apiVersion" . }} +kind: DaemonSet +{{- else }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: StatefulSet +{{- end }} +metadata: + name: {{ template "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if or .Values.vmagent.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmagent.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.vmagent.autoscaling.hpa.enabled) (eq $kind "deployment") }} + replicas: {{ .Values.vmagent.replicaCount }} + {{- end }} + {{- if .Values.vmagent.updateStrategy }} + {{ ternary "strategy" "updateStrategy" (eq $kind "deployment")}}: {{- toYaml .Values.vmagent.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" (list .Values.vmagent.podLabels .Values.commonLabels) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmagent + template: + metadata: + {{- if or .Values.vmagent.podAnnotations (not .Values.vmagent.existingScrapeConfigMap) }} + annotations: + {{- if .Values.vmagent.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.vmagent.existingScrapeConfigMap }} + checksum/scrape-config: {{ include (print $.Template.BasePath "/vmagent/configmap.yaml") $ | sha256sum }} + {{- end }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + spec: + serviceAccountName: {{ template "victoriametrics.vmagent.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vmagent.automountServiceAccountToken }} + {{- if .Values.vmagent.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmagent.podAffinityPreset "component" "vmagent" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmagent.podAntiAffinityPreset "component" "vmagent" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vmagent.nodeAffinityPreset.type "key" .Values.vmagent.nodeAffinityPreset.key "values" .Values.vmagent.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vmagent.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.priorityClassName }} + priorityClassName: {{ .Values.vmagent.priorityClassName | quote }} + {{- end }} + {{- if .Values.vmagent.schedulerName }} + schedulerName: {{ .Values.vmagent.schedulerName | quote }} + {{- end }} + {{- if .Values.vmagent.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmagent.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vmagent.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.vmagent.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: vmagent + image: {{ template "victoriametrics.vmagent.image" . }} + imagePullPolicy: {{ .Values.vmagent.image.pullPolicy }} + {{- if .Values.vmagent.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmagent.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmagent.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmagent.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- if .Values.envflagEnable }} + - vmagent + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --httpListenAddr=:{{ .Values.vmagent.containerPorts.http }} + - --promscrape.config=/opt/drycc/scrape/scrape.yml + - --remoteWrite.tmpDataPath=/opt/drycc/data/tmpData + - --remoteWrite.maxDiskUsagePerURL=2GiB + - --promscrape.cluster.membersCount={{.Values.vmagent.replicaCount}} + - --promscrape.cluster.memberNum=$(SHARD_NUM) + {{- $remoteHost := (include "victoriametrics.vminsert.fullname" .) }} + {{- $remotePort := .Values.vminsert.service.ports.http }} + - --remoteWrite.url=http://{{ $remoteHost }}:{{ $remotePort }}/insert/0/prometheus + {{- if .Values.vmagent.enableListeners.influxdb }} + - --influxListenAddr=:{{ .Values.vmagent.containerPorts.influxdb }} + {{- end }} + {{- if .Values.vmagent.enableListeners.graphite }} + - --graphiteListenAddr=:{{ .Values.vmagent.containerPorts.graphite }} + {{- end }} + {{- if .Values.vmagent.enableListeners.opentsdb }} + - --opentsdbListenAddr=:{{ .Values.vmagent.containerPorts.opentsdb }} + {{- end }} + {{- if .Values.vmagent.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: SHARD_NUM + valueFrom: + fieldRef: + fieldPath: metadata.labels['apps.kubernetes.io/pod-index'] + {{- if .Values.vmagent.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vmagent.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmagent.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vmagent.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmagent.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vmagent.resources }} + resources: {{- toYaml .Values.vmagent.resources | nindent 12 }} + {{- else if ne .Values.vmagent.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vmagent.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vmagent.containerPorts.http }} + {{- if .Values.vmagent.enableListeners.influxdb }} + - name: tcp-influxdb + containerPort: {{ .Values.vmagent.containerPorts.influxdb }} + {{- end }} + {{- if .Values.vmagent.enableListeners.graphite }} + - name: tcp-graphite + containerPort: {{ .Values.vmagent.containerPorts.graphite }} + {{- end }} + {{- if .Values.vmagent.enableListeners.opentsdb }} + - name: tcp-opentsdb + containerPort: {{ .Values.vmagent.containerPorts.opentsdb }} + {{- end }} + {{- if .Values.vmagent.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmagent.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmagent.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vmagent.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmagent.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmagent.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmagent.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vmagent.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmagent.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmagent.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scrape-configuration + mountPath: /opt/drycc/scrape/ + - name: empty-dir + mountPath: /opt/drycc/data/tmpData + subPath: app-tmpdata-dir + {{- if .Values.vmagent.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmagent.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: scrape-configuration + configMap: + name: {{ include "victoriametrics.vmagent.scrapeConfigMapName" . }} + - name: empty-dir + emptyDir: {} + {{- if .Values.vmagent.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/hpa.yaml new file mode 100644 index 00000000..41267c88 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/hpa.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled (eq .Values.vmagent.kind "deployment") .Values.vmagent.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vmagent.fullname" . }} + minReplicas: {{ .Values.vmagent.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vmagent.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vmagent.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vmagent.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vmagent.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vmagent.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/networkpolicy.yaml new file mode 100644 index 00000000..757ccd98 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/networkpolicy.yaml @@ -0,0 +1,99 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled .Values.vmagent.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.vmagent.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.vmauth.enabled }} + # Allow outbound connections to victoriametrics insert controller + - ports: + - port: {{ .Values.vmauth.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- else }} + # Allow outbound connections to victoriametrics insert controller + - ports: + - port: {{ .Values.vminsert.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- end }} + {{- if .Values.vmagent.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vmagent.containerPorts.http }} + {{- if .Values.vmagent.enableListeners.graphite }} + - port: {{ .Values.vmagent.containerPorts.graphite }} + {{- end }} + {{- if .Values.vmagent.enableListeners.influxdb }} + - port: {{ .Values.vmagent.containerPorts.influxdb }} + {{- end }} + {{- if .Values.vmagent.enableListeners.opentsdb }} + - port: {{ .Values.vmagent.containerPorts.opentsdb }} + {{- end }} + {{- if not .Values.vmagent.networkPolicy.allowExternal }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vmagent.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vmagent.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vmagent.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vmagent.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vmagent.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/pdb.yaml new file mode 100644 index 00000000..3046ff8d --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled (eq .Values.vmagent.kind "deployment") .Values.vmagent.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmagent.pdb.minAvailable }} + minAvailable: {{ .Values.vmagent.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vmagent.pdb.maxUnavailable ( not .Values.vmagent.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vmagent.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmagent +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/rbac.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/rbac.yaml new file mode 100644 index 00000000..2327e178 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/rbac.yaml @@ -0,0 +1,72 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled .Values.vmagent.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: {{ ternary "Role" "ClusterRole" .Values.vmagent.namespaced }} +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] + {{- if not .Values.vmagent.namespaced }} + - apiGroups: [""] + resources: + - nodes + - nodes/proxy + - nodes/metrics + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] + resourceNames: ["zhangjint"] + - nonResourceURLs: {{ include "common.tplvalues.render" ( dict "value" .Values.vmagent.allowedMetricsEndpoints "context" $ ) | nindent 6 }} + verbs: ["get"] + {{- end }} + {{- if .Values.vmagent.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +--- +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: {{ ternary "RoleBinding" "ClusterRoleBinding" .Values.vmagent.namespaced }} +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "victoriametrics.vmagent.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: {{ ternary "Role" "ClusterRole" .Values.vmagent.namespaced }} + name: {{ include "victoriametrics.vmagent.fullname" . }} +{{- end -}} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service-account.yaml new file mode 100644 index 00000000..6ae16900 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled .Values.vmagent.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vmagent.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if or .Values.vmagent.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vmagent.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service.yaml new file mode 100644 index 00000000..953ef79c --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/service.yaml @@ -0,0 +1,92 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmagent.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if or .Values.vmagent.service.annotations .Values.commonAnnotations .Values.vmagent.metrics.enabled .Values.vmagent.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmagent.service.annotations .Values.commonAnnotations .Values.vmagent.metrics.annotations) "context" .) }} + {{- if .Values.vmagent.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" .Values.vmagent.service.ports.http "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vmagent.service.type }} + {{- if and .Values.vmagent.service.clusterIP (eq .Values.vmagent.service.type "ClusterIP") }} + clusterIP: {{ .Values.vmagent.service.clusterIP }} + {{- end }} + {{- if .Values.vmagent.service.sessionAffinity }} + sessionAffinity: {{ .Values.vmagent.service.sessionAffinity }} + {{- end }} + {{- if .Values.vmagent.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vmagent.service.type "LoadBalancer") (eq .Values.vmagent.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vmagent.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vmagent.service.type "LoadBalancer") (not (empty .Values.vmagent.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vmagent.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vmagent.service.type "LoadBalancer") (not (empty .Values.vmagent.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vmagent.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vmagent.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vmagent.service.type "NodePort") (eq .Values.vmagent.service.type "LoadBalancer")) (not (empty .Values.vmagent.service.nodePorts.http)) }} + nodePort: {{ .Values.vmagent.service.nodePorts.http }} + {{- else if eq .Values.vmagent.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vmagent.enableListeners.graphite }} + - name: tcp-graphite + port: {{ .Values.vmagent.service.ports.graphite }} + protocol: TCP + {{- if and (or (eq .Values.vmagent.service.type "NodePort") (eq .Values.vmagent.service.type "LoadBalancer")) (not (empty .Values.vmagent.service.nodePorts.graphite)) }} + nodePort: {{ .Values.vmagent.service.nodePorts.graphite }} + {{- else if eq .Values.vmagent.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: tcp-graphite + {{- end }} + {{- if .Values.vmagent.enableListeners.opentsdb }} + - name: tcp-opentsdb + port: {{ .Values.vmagent.service.ports.opentsdb }} + protocol: TCP + {{- if and (or (eq .Values.vmagent.service.type "NodePort") (eq .Values.vmagent.service.type "LoadBalancer")) (not (empty .Values.vmagent.service.nodePorts.opentsdb)) }} + nodePort: {{ .Values.vmagent.service.nodePorts.opentsdb }} + {{- else if eq .Values.vmagent.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: tcp-opentsdb + {{- end }} + {{- if .Values.vmagent.enableListeners.influxdb }} + - name: tcp-influxdb + port: {{ .Values.vmagent.service.ports.influxdb }} + protocol: TCP + {{- if and (or (eq .Values.vmagent.service.type "NodePort") (eq .Values.vmagent.service.type "LoadBalancer")) (not (empty .Values.vmagent.service.nodePorts.influxdb)) }} + nodePort: {{ .Values.vmagent.service.nodePorts.influxdb }} + {{- else if eq .Values.vmagent.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: tcp-influxdb + {{- end }} + {{- if .Values.vmagent.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmagent +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/servicemonitor.yaml new file mode 100644 index 00000000..b69bcbc2 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled .Values.vmagent.metrics.enabled .Values.vmagent.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vmagent.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vmagent.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if or .Values.vmagent.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vmagent.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmagent + {{- if .Values.vmagent.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmagent.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vmagent.metrics.serviceMonitor.interval }} + interval: {{ .Values.vmagent.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vmagent.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vmagent.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vmagent.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vmagent.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vmagent.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmagent.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmagent.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/vpa.yaml new file mode 100644 index 00000000..23ec617f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmagent/vpa.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmagent.enabled (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vmagent.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmagent.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmagent + {{- if or .Values.vmagent.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmagent.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vmagent + {{- with .Values.vmagent.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmagent.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmagent.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + {{- if eq .Values.vmagent.kind "daemonset" }} + apiVersion: {{ include "common.capabilities.daemonset.apiVersion" . }} + kind: DaemonSet + {{- else }} + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + {{- end }} + name: {{ include "victoriametrics.vmagent.fullname" . }} + {{- if .Values.vmagent.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vmagent.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/configmap.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/configmap.yaml new file mode 100644 index 00000000..b223e86f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/configmap.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* Adding the helper in configmap.yaml for better readability */}} +{{- define "victoriametrics.vmalert.defaultRulesConfig" -}} +groups: [] +{{- end }} + +{{- if and .Values.vmalert.enabled (not .Values.vmalert.existingRulesConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- /* Convert the default configuration and extra overrides */ -}} + {{- $defaultConfiguration := include "victoriametrics.vmalert.defaultRulesConfig" . | fromYaml -}} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.vmalert.rulesConfigOverrides "context" $) | fromYaml -}} + + {{- /* Merge both maps and render the configuration */}} + alert-rules.yaml: | + {{- mergeOverwrite $defaultConfiguration $overrideConfiguration | toYaml | nindent 4 }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/deployment.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/deployment.yaml new file mode 100644 index 00000000..a01b0398 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/deployment.yaml @@ -0,0 +1,185 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmalert.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if or .Values.vmalert.deploymentAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmalert.deploymentAnnotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.vmalert.autoscaling.hpa.enabled }} + replicas: {{ .Values.vmalert.replicaCount }} + {{- end }} + {{- if .Values.vmalert.updateStrategy }} + strategy: {{- toYaml .Values.vmalert.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" (dict "values" (list .Values.vmalert.podLabels .Values.commonLabels) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmalert + template: + metadata: + {{- if or .Values.vmalert.podAnnotations (not .Values.vmalert.existingRulesConfigMap) }} + annotations: + {{- if .Values.vmalert.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.vmalert.existingRulesConfigMap }} + checksum/rules-config: {{ include (print $.Template.BasePath "/vmalert/configmap.yaml") $ | sha256sum }} + {{- end }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + spec: + serviceAccountName: {{ template "victoriametrics.vmalert.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vmalert.automountServiceAccountToken }} + {{- if .Values.vmalert.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmalert.podAffinityPreset "component" "vmalert" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmalert.podAntiAffinityPreset "component" "vmalert" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vmalert.nodeAffinityPreset.type "key" .Values.vmalert.nodeAffinityPreset.key "values" .Values.vmalert.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vmalert.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.priorityClassName }} + priorityClassName: {{ .Values.vmalert.priorityClassName | quote }} + {{- end }} + {{- if .Values.vmalert.schedulerName }} + schedulerName: {{ .Values.vmalert.schedulerName | quote }} + {{- end }} + {{- if .Values.vmalert.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmalert.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vmalert.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.vmalert.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: vmalert + image: {{ template "victoriametrics.vmalert.image" . }} + imagePullPolicy: {{ .Values.vmalert.image.pullPolicy }} + {{- if .Values.vmalert.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmalert.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmalert.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmalert.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- if .Values.envflagEnable }} + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --httpListenAddr=:{{ .Values.vmalert.containerPorts.http }} + - --rule=/config/alert-rules.yaml + {{- $remoteWriteHost := ternary (include "victoriametrics.vmauth.fullname" .) (include "victoriametrics.vminsert.fullname" .) .Values.vmauth.enabled }} + {{- $remoteWritePort := ternary .Values.vmauth.service.ports.http .Values.vminsert.service.ports.http .Values.vmauth.enabled }} + - --remoteWrite.url=http://{{ $remoteWriteHost }}:{{ $remoteWritePort }}/insert/0/prometheus + {{- $remoteReadHost := ternary (include "victoriametrics.vmauth.fullname" .) (include "victoriametrics.vmselect.fullname" .) .Values.vmauth.enabled }} + {{- $remoteReadPort := ternary .Values.vmauth.service.ports.http .Values.vmselect.service.ports.http .Values.vmauth.enabled }} + - --remoteWrite.url=http://{{ $remoteWriteHost }}:{{ $remoteWritePort }}/select/0/prometheus + {{- if .Values.vmalert.datasourceUrl }} + - --datasource.url={{ .Values.vmalert.datasourceUrl }} + {{- else }} + - --datasource.url=http://{{ $remoteWriteHost }}:{{ $remoteWritePort }}/select/0/prometheus + {{- end }} + {{- if .Values.vmalert.notifierUrl }} + - --notifier.url={{ .Values.vmalert.notifierUrl }} + {{- end }} + {{- if .Values.vmalert.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.vmalert.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vmalert.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmalert.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vmalert.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmalert.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vmalert.resources }} + resources: {{- toYaml .Values.vmalert.resources | nindent 12 }} + {{- else if ne .Values.vmalert.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vmalert.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vmalert.containerPorts.http }} + {{- if .Values.vmalert.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmalert.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmalert.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vmalert.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmalert.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmalert.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmalert.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vmalert.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmalert.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmalert.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: rules-configuration + mountPath: /config + {{- if .Values.vmalert.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmalert.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: rules-configuration + configMap: + name: {{ include "victoriametrics.vmalert.rulesConfigMapName" . }} + {{- if .Values.vmalert.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/hpa.yaml new file mode 100644 index 00000000..d65175b8 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/hpa.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled .Values.vmalert.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vmalert.fullname" . }} + minReplicas: {{ .Values.vmalert.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vmalert.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vmalert.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vmalert.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vmalert.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vmalert.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/networkpolicy.yaml new file mode 100644 index 00000000..7653a9ef --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/networkpolicy.yaml @@ -0,0 +1,76 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled .Values.vmalert.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if .Values.vmalert.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to victoriametrics storage controller + - ports: + - port: {{ .Values.vmstorage.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.vmalert.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vmalert.containerPorts.http }} + {{- if not .Values.vmalert.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vmalert.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vmalert.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vmalert.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vmalert.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vmalert.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/pdb.yaml new file mode 100644 index 00000000..718863da --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled .Values.vmalert.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmalert.pdb.minAvailable }} + minAvailable: {{ .Values.vmalert.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vmalert.pdb.maxUnavailable ( not .Values.vmalert.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vmalert.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmalert +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service-account.yaml new file mode 100644 index 00000000..939a7915 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled .Values.vmalert.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vmalert.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if or .Values.vmalert.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vmalert.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service.yaml new file mode 100644 index 00000000..2e3b7864 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/service.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmalert.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if or .Values.vmalert.service.annotations .Values.commonAnnotations .Values.vmalert.metrics.enabled .Values.vmalert.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmalert.service.annotations .Values.commonAnnotations .Values.vmalert.metrics.annotations) "context" .) }} + {{- if .Values.vmalert.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" .Values.vmalert.service.ports.http "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vmalert.service.type }} + {{- if and .Values.vmalert.service.clusterIP (eq .Values.vmalert.service.type "ClusterIP") }} + clusterIP: {{ .Values.vmalert.service.clusterIP }} + {{- end }} + {{- if .Values.vmalert.service.sessionAffinity }} + sessionAffinity: {{ .Values.vmalert.service.sessionAffinity }} + {{- end }} + {{- if .Values.vmalert.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vmalert.service.type "LoadBalancer") (eq .Values.vmalert.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vmalert.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vmalert.service.type "LoadBalancer") (not (empty .Values.vmalert.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vmalert.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vmalert.service.type "LoadBalancer") (not (empty .Values.vmalert.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vmalert.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vmalert.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vmalert.service.type "NodePort") (eq .Values.vmalert.service.type "LoadBalancer")) (not (empty .Values.vmalert.service.nodePorts.http)) }} + nodePort: {{ .Values.vmalert.service.nodePorts.http }} + {{- else if eq .Values.vmalert.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vmalert.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmalert +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/servicemonitor.yaml new file mode 100644 index 00000000..d040f7ed --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled .Values.vmalert.metrics.enabled .Values.vmalert.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vmalert.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vmalert.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if or .Values.vmalert.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vmalert.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmalert + {{- if .Values.vmalert.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmalert.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vmalert.metrics.serviceMonitor.interval }} + interval: {{ .Values.vmalert.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vmalert.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vmalert.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vmalert.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vmalert.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vmalert.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmalert.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmalert.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/vpa.yaml new file mode 100644 index 00000000..64bae63c --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmalert/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmalert.enabled (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vmalert.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmalert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmalert + {{- if or .Values.vmalert.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmalert.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vmalert + {{- with .Values.vmalert.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmalert.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmalert.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vmalert.fullname" . }} + {{- if .Values.vmalert.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vmalert.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/dep-ds.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/dep-ds.yaml new file mode 100644 index 00000000..bd72864e --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/dep-ds.yaml @@ -0,0 +1,178 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmauth.enabled }} +{{- $kind := lower .Values.vmauth.kind }} +{{- if eq $kind "daemonset" }} +apiVersion: {{ include "common.capabilities.daemonset.apiVersion" . }} +kind: DaemonSet +{{- else }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmauth.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.vmauth.autoscaling.hpa.enabled) (eq $kind "deployment") }} + replicas: {{ .Values.vmauth.replicaCount }} + {{- end }} + {{- if .Values.vmauth.updateStrategy }} + {{ ternary "strategy" "updateStrategy" (eq $kind "deployment")}}: {{- toYaml .Values.vmauth.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" (dict "values" (list .Values.vmauth.podLabels .Values.commonLabels) "context" .) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmauth + template: + metadata: + {{- if or .Values.vmauth.podAnnotations (not .Values.vmauth.existingSecret) }} + annotations: + {{- if .Values.vmauth.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.vmauth.existingSecret }} + checksum/secret: {{ include (print $.Template.BasePath "/vmauth/secret.yaml") $ | sha256sum }} + {{- end }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + spec: + serviceAccountName: {{ template "victoriametrics.vmauth.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vmauth.automountServiceAccountToken }} + {{- if .Values.vmauth.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmauth.podAffinityPreset "component" "vmauth" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmauth.podAntiAffinityPreset "component" "vmauth" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vmauth.nodeAffinityPreset.type "key" .Values.vmauth.nodeAffinityPreset.key "values" .Values.vmauth.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vmauth.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.priorityClassName }} + priorityClassName: {{ .Values.vmauth.priorityClassName | quote }} + {{- end }} + {{- if .Values.vmauth.schedulerName }} + schedulerName: {{ .Values.vmauth.schedulerName | quote }} + {{- end }} + {{- if .Values.vmauth.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmauth.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vmauth.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.vmauth.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: vmauth + image: {{ template "victoriametrics.vmauth.image" . }} + imagePullPolicy: {{ .Values.vmauth.image.pullPolicy }} + {{- if .Values.vmauth.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmauth.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmauth.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmauth.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- if .Values.envflagEnable }} + - vmauth + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --httpListenAddr=:{{ .Values.vmauth.containerPorts.http }} + - --auth.config=/config/auth.yml + {{- if .Values.vmauth.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.vmauth.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vmauth.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmauth.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vmauth.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmauth.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vmauth.resources }} + resources: {{- toYaml .Values.vmauth.resources | nindent 12 }} + {{- else if ne .Values.vmauth.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vmauth.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vmauth.containerPorts.http }} + {{- if .Values.vmauth.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmauth.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmauth.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vmauth.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmauth.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmauth.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmauth.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vmauth.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmauth.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmauth.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /config + {{- if .Values.vmauth.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmauth.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: configuration + secret: + secretName: {{ include "victoriametrics.vmauth.secretName" . }} + {{- if .Values.vmauth.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/hpa.yaml new file mode 100644 index 00000000..552326cf --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/hpa.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled (eq .Values.vmauth.kind "deployment") .Values.vmauth.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vmauth.fullname" . }} + minReplicas: {{ .Values.vmauth.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vmauth.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vmauth.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vmauth.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vmauth.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vmauth.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress-tls-secret.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress-tls-secret.yaml new file mode 100644 index 00000000..fd5705c2 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress-tls-secret.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmauth.ingress.enabled }} +{{- if .Values.vmauth.ingress.secrets }} +{{- range .Values.vmauth.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" $.Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.vmauth.ingress.tls .Values.vmauth.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.vmauth.ingress.hostname }} +{{- $ca := genCA "vmauth-ca" 365 }} +{{- $cert := genSignedCert .Values.vmauth.ingress.hostname nil (list .Values.vmauth.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress.yaml new file mode 100644 index 00000000..01c01f0f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/ingress.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled .Values.vmauth.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmauth.ingress.ingressClassName }} + ingressClassName: {{ .Values.vmauth.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.vmauth.ingress.hostname }} + - host: {{ .Values.vmauth.ingress.hostname }} + http: + paths: + {{- if .Values.vmauth.ingress.extraPaths }} + {{- toYaml .Values.vmauth.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.vmauth.ingress.path }} + pathType: {{ .Values.vmauth.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vmauth.fullname" . | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.vmauth.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vmauth.fullname" $ | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.vmauth.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.vmauth.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vmauth.ingress.annotations )) .Values.vmauth.ingress.selfSigned)) .Values.vmauth.ingress.extraTls }} + tls: + {{- if and .Values.vmauth.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vmauth.ingress.annotations )) .Values.vmauth.ingress.selfSigned) }} + - hosts: + - {{ .Values.vmauth.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.vmauth.ingress.hostname }} + {{- end }} + {{- if .Values.vmauth.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/networkpolicy.yaml new file mode 100644 index 00000000..6b016dd7 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/networkpolicy.yaml @@ -0,0 +1,100 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled .Values.vmauth.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.vmauth.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to victoriametrics insert controller + - ports: + - port: {{ .Values.vminsert.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + # Allow outbound connections to victoriametrics select controller + - ports: + - port: {{ .Values.vmselect.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.vmauth.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + {{- if eq .Values.vmauth.service.type "ClusterIP" }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vmauth.containerPorts.http }} + {{- if not .Values.vmauth.networkPolicy.allowExternal }} + from: + {{- range $namespace := .Values.vmauth.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vmauth.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vmauth.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vmauth.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vmauth.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vmauth.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + {{- if eq .Values.vmauth.service.type "LoadBalancer" }} + ingress: + - {} + {{- end}} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/pdb.yaml new file mode 100644 index 00000000..82882e27 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled (eq .Values.vmauth.kind "deployment") .Values.vmauth.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmauth.pdb.minAvailable }} + minAvailable: {{ .Values.vmauth.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vmauth.pdb.maxUnavailable ( not .Values.vmauth.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vmauth.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmauth +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/secret.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/secret.yaml new file mode 100644 index 00000000..a0da2f30 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/secret.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* Adding the helper in configmap.yaml for better readability */}} +{{- define "victoriametrics.vmauth.defaultConfig" -}} +users: +- username: {{ .Values.vmauth.user.username | quote }} + password: {{ .Values.vmauth.user.password | quote }} + url_map: + - discover_backend_ips: true + src_paths: + - /select/.* + url_prefix: http://{{ include "victoriametrics.vmselect.fullname" . }}:{{ .Values.vmselect.service.ports.http }} + - discover_backend_ips: true + src_paths: + - /insert/.* + url_prefix: http://{{ include "victoriametrics.vminsert.fullname" . }}:{{ .Values.vminsert.service.ports.http }} +{{- end }} + +{{- if and .Values.vmauth.enabled (not .Values.vmauth.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +stringData: + {{- /* Convert the default configuration and extra overrides */ -}} + {{- $defaultConfiguration := include "victoriametrics.vmauth.defaultConfig" . | fromYaml -}} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.vmauth.configOverrides "context" $) | fromYaml -}} + + {{- /* Merge both maps and render the configuration */}} + auth.yml: | + {{- mergeOverwrite $defaultConfiguration $overrideConfiguration | toYaml | nindent 4 }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service-account.yaml new file mode 100644 index 00000000..451acd98 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled .Values.vmauth.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vmauth.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vmauth.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service.yaml new file mode 100644 index 00000000..b10555b5 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/service.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmauth.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.service.annotations .Values.commonAnnotations .Values.vmauth.metrics.enabled .Values.vmauth.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmauth.service.annotations .Values.commonAnnotations .Values.vmauth.metrics.annotations) "context" .) }} + {{- if .Values.vmauth.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" .Values.vmauth.service.ports.http "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vmauth.service.type }} + {{- if and .Values.vmauth.service.clusterIP (eq .Values.vmauth.service.type "ClusterIP") }} + clusterIP: {{ .Values.vmauth.service.clusterIP }} + {{- end }} + {{- if .Values.vmauth.service.sessionAffinity }} + sessionAffinity: {{ .Values.vmauth.service.sessionAffinity }} + {{- end }} + {{- if .Values.vmauth.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vmauth.service.type "LoadBalancer") (eq .Values.vmauth.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vmauth.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vmauth.service.type "LoadBalancer") (not (empty .Values.vmauth.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vmauth.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vmauth.service.type "LoadBalancer") (not (empty .Values.vmauth.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vmauth.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vmauth.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vmauth.service.type "NodePort") (eq .Values.vmauth.service.type "LoadBalancer")) (not (empty .Values.vmauth.service.nodePorts.http)) }} + nodePort: {{ .Values.vmauth.service.nodePorts.http }} + {{- else if eq .Values.vmauth.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vmauth.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmauth +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/servicemonitor.yaml new file mode 100644 index 00000000..e6bdebbc --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled .Values.vmauth.metrics.enabled .Values.vmauth.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vmauth.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vmauth.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vmauth.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmauth + {{- if .Values.vmauth.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmauth.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vmauth.metrics.serviceMonitor.interval }} + interval: {{ .Values.vmauth.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vmauth.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vmauth.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vmauth.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vmauth.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vmauth.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmauth.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmauth.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/vpa.yaml new file mode 100644 index 00000000..558adcac --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmauth/vpa.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmauth.enabled (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vmauth.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmauth.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmauth + {{- if or .Values.vmauth.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmauth.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vmauth + {{- with .Values.vmauth.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmauth.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmauth.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + {{- if eq .Values.vmauth.kind "daemonset" }} + apiVersion: {{ include "common.capabilities.daemonset.apiVersion" . }} + kind: DaemonSet + {{- else }} + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + {{- end }} + name: {{ include "victoriametrics.vmauth.fullname" . }} + {{- if .Values.vmauth.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vmauth.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/deployment.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/deployment.yaml new file mode 100644 index 00000000..23a0697b --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/deployment.yaml @@ -0,0 +1,160 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.deploymentAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vminsert.deploymentAnnotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.vminsert.autoscaling.hpa.enabled }} + replicas: {{ .Values.vminsert.replicaCount }} + {{- end }} + {{- if .Values.vminsert.updateStrategy }} + strategy: {{- toYaml .Values.vminsert.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" (dict "values" (list .Values.vminsert.podLabels .Values.commonLabels) "context" .) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vminsert + template: + metadata: + {{- if .Values.vminsert.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + spec: + serviceAccountName: {{ template "victoriametrics.vminsert.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vminsert.automountServiceAccountToken }} + {{- if .Values.vminsert.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vminsert.podAffinityPreset "component" "vminsert" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vminsert.podAntiAffinityPreset "component" "vminsert" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vminsert.nodeAffinityPreset.type "key" .Values.vminsert.nodeAffinityPreset.key "values" .Values.vminsert.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vminsert.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.priorityClassName }} + priorityClassName: {{ .Values.vminsert.priorityClassName | quote }} + {{- end }} + {{- if .Values.vminsert.schedulerName }} + schedulerName: {{ .Values.vminsert.schedulerName | quote }} + {{- end }} + {{- if .Values.vminsert.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vminsert.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vminsert.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.vminsert.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: vminsert + image: {{ template "victoriametrics.vminsert.image" . }} + imagePullPolicy: {{ .Values.vminsert.image.pullPolicy }} + {{- if .Values.vminsert.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vminsert.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vminsert.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vminsert.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.args "context" $) | nindent 12 }} + {{- else }} + args: + - vminsert + - --replicationFactor=2 + {{- if .Values.envflagEnable }} + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --httpListenAddr=:{{ .Values.vminsert.containerPorts.http }} + {{- range $e, $i := until (int .Values.vmstorage.replicaCount) }} + - --storageNode={{ include "victoriametrics.vmstorage.fullname" $ }}-{{ int $i }}.{{ printf "%s-headless" (include "victoriametrics.vmstorage.fullname" $) | trunc 63 | trimSuffix "-" }}:{{ $.Values.vmstorage.containerPorts.vminsert }} + {{- end }} + {{- if .Values.vminsert.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.vminsert.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vminsert.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vminsert.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vminsert.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vminsert.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vminsert.resources }} + resources: {{- toYaml .Values.vminsert.resources | nindent 12 }} + {{- else if ne .Values.vminsert.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vminsert.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vminsert.containerPorts.http }} + {{- if .Values.vminsert.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vminsert.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vminsert.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vminsert.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vminsert.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vminsert.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vminsert.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vminsert.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vminsert.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vminsert.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vminsert.extraVolumeMounts }} + volumeMounts: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vminsert.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.extraVolumes }} + volumes: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.extraVolumes "context" $) | nindent 8 }} + {{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/hpa.yaml new file mode 100644 index 00000000..54611f67 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/hpa.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vminsert.fullname" . }} + minReplicas: {{ .Values.vminsert.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vminsert.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vminsert.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vminsert.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vminsert.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vminsert.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress-tls-secret.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress-tls-secret.yaml new file mode 100644 index 00000000..261aafe4 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress-tls-secret.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.ingress.enabled }} +{{- if .Values.vminsert.ingress.secrets }} +{{- range .Values.vminsert.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" $.Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.vminsert.ingress.tls .Values.vminsert.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.vminsert.ingress.hostname }} +{{- $ca := genCA "vminsert-ca" 365 }} +{{- $cert := genSignedCert .Values.vminsert.ingress.hostname nil (list .Values.vminsert.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress.yaml new file mode 100644 index 00000000..32cecc8f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/ingress.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vminsert.ingress.ingressClassName }} + ingressClassName: {{ .Values.vminsert.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.vminsert.ingress.hostname }} + - host: {{ .Values.vminsert.ingress.hostname }} + http: + paths: + {{- if .Values.vminsert.ingress.extraPaths }} + {{- toYaml .Values.vminsert.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.vminsert.ingress.path }} + pathType: {{ .Values.vminsert.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vminsert.fullname" . | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.vminsert.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vminsert.fullname" $ | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.vminsert.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.vminsert.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vminsert.ingress.annotations )) .Values.vminsert.ingress.selfSigned)) .Values.vminsert.ingress.extraTls }} + tls: + {{- if and .Values.vminsert.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vminsert.ingress.annotations )) .Values.vminsert.ingress.selfSigned) }} + - hosts: + - {{ .Values.vminsert.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.vminsert.ingress.hostname }} + {{- end }} + {{- if .Values.vminsert.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/networkpolicy.yaml new file mode 100644 index 00000000..11c01b23 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/networkpolicy.yaml @@ -0,0 +1,79 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if .Values.vminsert.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to victoriametrics storage controller + - ports: + - port: {{ .Values.vmstorage.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.vminsert.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vminsert.containerPorts.http }} + {{- if not .Values.vminsert.networkPolicy.allowExternal }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vminsert.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vminsert.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vminsert.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vminsert.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vminsert.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/pdb.yaml new file mode 100644 index 00000000..27b94864 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vminsert.pdb.minAvailable }} + minAvailable: {{ .Values.vminsert.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vminsert.pdb.maxUnavailable ( not .Values.vminsert.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vminsert.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vminsert +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service-account.yaml new file mode 100644 index 00000000..ce874398 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vminsert.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vminsert.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vminsert.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service.yaml new file mode 100644 index 00000000..22412e0d --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/service.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.service.annotations .Values.commonAnnotations .Values.vminsert.metrics.enabled .Values.vminsert.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vminsert.service.annotations .Values.commonAnnotations .Values.vminsert.metrics.annotations) "context" .) }} + {{- if .Values.vminsert.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" .Values.vminsert.service.ports.http "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vminsert.service.type }} + {{- if and .Values.vminsert.service.clusterIP (eq .Values.vminsert.service.type "ClusterIP") }} + clusterIP: {{ .Values.vminsert.service.clusterIP }} + {{- end }} + {{- if .Values.vminsert.service.sessionAffinity }} + sessionAffinity: {{ .Values.vminsert.service.sessionAffinity }} + {{- end }} + {{- if .Values.vminsert.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vminsert.service.type "LoadBalancer") (eq .Values.vminsert.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vminsert.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vminsert.service.type "LoadBalancer") (not (empty .Values.vminsert.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vminsert.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vminsert.service.type "LoadBalancer") (not (empty .Values.vminsert.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vminsert.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vminsert.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vminsert.service.type "NodePort") (eq .Values.vminsert.service.type "LoadBalancer")) (not (empty .Values.vminsert.service.nodePorts.http)) }} + nodePort: {{ .Values.vminsert.service.nodePorts.http }} + {{- else if eq .Values.vminsert.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vminsert.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vminsert diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/servicemonitor.yaml new file mode 100644 index 00000000..4284fa93 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vminsert.metrics.enabled .Values.vminsert.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vminsert.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vminsert.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vminsert.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vminsert + {{- if .Values.vminsert.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vminsert.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vminsert.metrics.serviceMonitor.interval }} + interval: {{ .Values.vminsert.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vminsert.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vminsert.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vminsert.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vminsert.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vminsert.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vminsert.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vminsert.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/vpa.yaml new file mode 100644 index 00000000..d6e07f71 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vminsert/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vminsert.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vminsert.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vminsert + {{- if or .Values.vminsert.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vminsert.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vminsert + {{- with .Values.vminsert.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vminsert.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vminsert.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "victoriametrics.vminsert.fullname" . }} + {{- if .Values.vminsert.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vminsert.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/dep-sts.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/dep-sts.yaml new file mode 100644 index 00000000..896a6ad3 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/dep-sts.yaml @@ -0,0 +1,178 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- $kind := lower .Values.vmselect.kind }} +{{- if eq $kind "statefulset" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +{{- else }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmselect.annotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.vmselect.autoscaling.hpa.enabled }} + replicas: {{ .Values.vmselect.replicaCount }} + {{- end }} + {{- if eq $kind "statefulset" }} + serviceName: {{ printf "%s-headless" (include "victoriametrics.vmstorage.fullname" .) | trunc 63 | trimSuffix "-" }} + podManagementPolicy: {{ .Values.vmselect.podManagementPolicy }} + {{- end }} + {{- if .Values.vmselect.updateStrategy }} + {{ ternary "strategy" "updateStrategy" (eq $kind "deployment")}}: {{- toYaml .Values.vmselect.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" (dict "values" (list .Values.vmselect.podLabels .Values.commonLabels) "context" .) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmselect + template: + metadata: + {{- if .Values.vmselect.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + spec: + serviceAccountName: {{ template "victoriametrics.vmselect.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vmselect.automountServiceAccountToken }} + {{- if .Values.vmselect.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmselect.podAffinityPreset "component" "vmselect" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmselect.podAntiAffinityPreset "component" "vmselect" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vmselect.nodeAffinityPreset.type "key" .Values.vmselect.nodeAffinityPreset.key "values" .Values.vmselect.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vmselect.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.priorityClassName }} + priorityClassName: {{ .Values.vmselect.priorityClassName | quote }} + {{- end }} + {{- if .Values.vmselect.schedulerName }} + schedulerName: {{ .Values.vmselect.schedulerName | quote }} + {{- end }} + {{- if .Values.vmselect.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmselect.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vmselect.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.vmselect.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: vmselect + image: {{ template "victoriametrics.vmselect.image" . }} + imagePullPolicy: {{ .Values.vmselect.image.pullPolicy }} + {{- if .Values.vmselect.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmselect.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmselect.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmselect.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.args "context" $) | nindent 12 }} + {{- else }} + args: + - vmselect + - --replicationFactor=2 + - --dedup.minScrapeInterval=1ms + {{- if .Values.envflagEnable }} + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --cacheDataPath=/opt/drycc/cache + - --httpListenAddr=:{{ .Values.vmselect.containerPorts.http }} + {{- range $e, $i := until (int .Values.vmstorage.replicaCount) }} + - --storageNode={{ include "victoriametrics.vmstorage.fullname" $ }}-{{ int $i }}.{{ printf "%s-headless" (include "victoriametrics.vmstorage.fullname" $) | trunc 63 | trimSuffix "-" }}:{{ $.Values.vmstorage.containerPorts.vmselect }} + {{- end }} + {{- if .Values.vmselect.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.vmselect.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vmselect.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmselect.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vmselect.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmselect.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vmselect.resources }} + resources: {{- toYaml .Values.vmselect.resources | nindent 12 }} + {{- else if ne .Values.vmselect.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vmselect.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vmselect.containerPorts.http }} + {{- if .Values.vmselect.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmselect.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmselect.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vmselect.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmselect.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmselect.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmselect.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vmselect.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmselect.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmselect.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /opt/drycc/cache + subPath: app-cache-dir + {{- if .Values.vmselect.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmselect.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if .Values.vmselect.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.extraVolumes "context" $) | nindent 8 }} + {{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/headless-service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/headless-service.yaml new file mode 100644 index 00000000..68ce140f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/headless-service.yaml @@ -0,0 +1,30 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (eq .Values.vmselect.kind "statefulset") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "victoriametrics.vmselect.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.commonAnnotations .Values.vmselect.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: http + port: {{ .Values.vmselect.containerPorts.http }} + targetPort: http + protocol: TCP + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmselect +{{- end }} \ No newline at end of file diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/hpa.yaml new file mode 100644 index 00000000..aad19d9f --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/hpa.yaml @@ -0,0 +1,47 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + {{- if eq .Values.vmselect.kind "statefulset" }} + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + {{- else }} + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + {{- end }} + name: {{ include "victoriametrics.vmselect.fullname" . }} + minReplicas: {{ .Values.vmselect.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vmselect.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vmselect.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vmselect.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vmselect.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vmselect.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress-tls-secret.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress-tls-secret.yaml new file mode 100644 index 00000000..afd30458 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress-tls-secret.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.ingress.enabled }} +{{- if .Values.vmselect.ingress.secrets }} +{{- range .Values.vmselect.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" $.Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.vmselect.ingress.tls .Values.vmselect.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.vmselect.ingress.hostname }} +{{- $ca := genCA "vmselect-ca" 365 }} +{{- $cert := genSignedCert .Values.vmselect.ingress.hostname nil (list .Values.vmselect.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress.yaml new file mode 100644 index 00000000..5a068658 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/ingress.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmselect.ingress.ingressClassName }} + ingressClassName: {{ .Values.vmselect.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.vmselect.ingress.hostname }} + - host: {{ .Values.vmselect.ingress.hostname }} + http: + paths: + {{- if .Values.vmselect.ingress.extraPaths }} + {{- toYaml .Values.vmselect.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.vmselect.ingress.path }} + pathType: {{ .Values.vmselect.ingress.pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vmselect.fullname" . | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.vmselect.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + pathType: {{ default "ImplementationSpecific" .pathType }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "victoriametrics.vmselect.fullname" $ | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.vmselect.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.vmselect.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vmselect.ingress.annotations )) .Values.vmselect.ingress.selfSigned)) .Values.vmselect.ingress.extraTls }} + tls: + {{- if and .Values.vmselect.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.vmselect.ingress.annotations )) .Values.vmselect.ingress.selfSigned) }} + - hosts: + - {{ .Values.vmselect.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.vmselect.ingress.hostname }} + {{- end }} + {{- if .Values.vmselect.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/networkpolicy.yaml new file mode 100644 index 00000000..a1814be3 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/networkpolicy.yaml @@ -0,0 +1,79 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.vmselect.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to victoriametrics storage controller + - ports: + - port: {{ .Values.vmstorage.containerPorts.http }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.vmselect.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vmselect.containerPorts.http }} + {{- if not .Values.vmselect.networkPolicy.allowExternal }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vmselect.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vmselect.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vmselect.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vmselect.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vmselect.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/pdb.yaml new file mode 100644 index 00000000..cd08d62e --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmselect.pdb.minAvailable }} + minAvailable: {{ .Values.vmselect.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vmselect.pdb.maxUnavailable ( not .Values.vmselect.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vmselect.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmselect +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service-account.yaml new file mode 100644 index 00000000..3a5d6cf7 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmselect.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vmselect.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vmselect.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service.yaml new file mode 100644 index 00000000..0bb2407a --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/service.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.service.annotations .Values.commonAnnotations .Values.vmselect.metrics.enabled .Values.vmselect.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmselect.service.annotations .Values.commonAnnotations .Values.vmselect.metrics.annotations) "context" .) }} + {{- if .Values.vmselect.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" .Values.vmselect.service.ports.http "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vmselect.service.type }} + {{- if and .Values.vmselect.service.clusterIP (eq .Values.vmselect.service.type "ClusterIP") }} + clusterIP: {{ .Values.vmselect.service.clusterIP }} + {{- end }} + {{- if .Values.vmselect.service.sessionAffinity }} + sessionAffinity: {{ .Values.vmselect.service.sessionAffinity }} + {{- end }} + {{- if .Values.vmselect.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vmselect.service.type "LoadBalancer") (eq .Values.vmselect.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vmselect.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vmselect.service.type "LoadBalancer") (not (empty .Values.vmselect.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vmselect.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vmselect.service.type "LoadBalancer") (not (empty .Values.vmselect.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vmselect.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vmselect.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vmselect.service.type "NodePort") (eq .Values.vmselect.service.type "LoadBalancer")) (not (empty .Values.vmselect.service.nodePorts.http)) }} + nodePort: {{ .Values.vmselect.service.nodePorts.http }} + {{- else if eq .Values.vmselect.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vmselect.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmselect diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/servicemonitor.yaml new file mode 100644 index 00000000..d5f8ffdd --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmselect.metrics.enabled .Values.vmselect.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vmselect.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vmselect.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vmselect.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmselect + {{- if .Values.vmselect.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmselect.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vmselect.metrics.serviceMonitor.interval }} + interval: {{ .Values.vmselect.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vmselect.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vmselect.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vmselect.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vmselect.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vmselect.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmselect.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmselect.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/vpa.yaml new file mode 100644 index 00000000..2f4cedc6 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmselect/vpa.yaml @@ -0,0 +1,50 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vmselect.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmselect.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmselect + {{- if or .Values.vmselect.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmselect.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vmselect + {{- with .Values.vmselect.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmselect.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmselect.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + {{- if eq .Values.vmselect.kind "statefulset" }} + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + {{- else }} + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + {{- end }} + name: {{ include "victoriametrics.vmselect.fullname" . }} + {{- if .Values.vmselect.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vmselect.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/headless-service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/headless-service.yaml new file mode 100644 index 00000000..78013fdd --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/headless-service.yaml @@ -0,0 +1,36 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "victoriametrics.vmstorage.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.commonAnnotations .Values.vmstorage.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: http + port: {{ .Values.vmstorage.containerPorts.http }} + targetPort: http + protocol: TCP + - name: tcp-vminsert + port: {{ .Values.vmstorage.containerPorts.vminsert }} + targetPort: tcp-vminsert + protocol: TCP + - name: tcp-vmselect + port: {{ .Values.vmstorage.containerPorts.vmselect }} + targetPort: tcp-vmselect + protocol: TCP + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmstorage diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/hpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/hpa.yaml new file mode 100644 index 00000000..83b4eae8 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/hpa.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmstorage.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" (dict "context" $) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "victoriametrics.vmstorage.fullname" . }} + minReplicas: {{ .Values.vmstorage.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.vmstorage.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.vmstorage.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.vmstorage.autoscaling.hpa.targetMemory }} + {{- end }} + {{- if .Values.vmstorage.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vmstorage.autoscaling.hpa.targetCPU }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/networkpolicy.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/networkpolicy.yaml new file mode 100644 index 00000000..684c9c5a --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/networkpolicy.yaml @@ -0,0 +1,73 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmstorage.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + - Egress + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.vmstorage.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.vmstorage.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.vmstorage.containerPorts.http }} + - port: {{ .Values.vmstorage.containerPorts.vminsert }} + - port: {{ .Values.vmstorage.containerPorts.vmselect }} + {{- if not .Values.vmstorage.networkPolicy.allowExternal }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.vmstorage.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.vmstorage.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.vmstorage.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.vmstorage.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.vmstorage.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/pdb.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/pdb.yaml new file mode 100644 index 00000000..f85245a5 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/pdb.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmstorage.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.vmstorage.pdb.minAvailable }} + minAvailable: {{ .Values.vmstorage.pdb.minAvailable }} + {{- end }} + {{- if or .Values.vmstorage.pdb.maxUnavailable ( not .Values.vmstorage.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.vmstorage.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmstorage +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service-account.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service-account.yaml new file mode 100644 index 00000000..3d7867c8 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service-account.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.vmstorage.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "victoriametrics.vmstorage.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.vmstorage.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.vmstorage.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service.yaml new file mode 100644 index 00000000..40abf629 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/service.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.vmstorage.service.annotations .Values.commonAnnotations .Values.vmstorage.metrics.enabled .Values.vmstorage.metrics.annotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmstorage.service.annotations .Values.commonAnnotations .Values.vmstorage.metrics.annotations) "context" .) }} + {{- if .Values.vmstorage.metrics.enabled }} + {{- $defaultMetricsAnnotations := dict "prometheus.io/scrape" "true" "prometheus.io/port" (.Values.vmstorage.service.ports.http ) "prometheus.io/path" "/metrics" }} + {{- $annotations = include "common.tplvalues.merge" (dict "values" (list $annotations $defaultMetricsAnnotations) "context" .) }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.vmstorage.service.type }} + {{- if and .Values.vmstorage.service.clusterIP (eq .Values.vmstorage.service.type "ClusterIP") }} + clusterIP: {{ .Values.vmstorage.service.clusterIP }} + {{- end }} + {{- if .Values.vmstorage.service.sessionAffinity }} + sessionAffinity: {{ .Values.vmstorage.service.sessionAffinity }} + {{- end }} + {{- if .Values.vmstorage.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.vmstorage.service.type "LoadBalancer") (eq .Values.vmstorage.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.vmstorage.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.vmstorage.service.type "LoadBalancer") (not (empty .Values.vmstorage.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.vmstorage.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.vmstorage.service.type "LoadBalancer") (not (empty .Values.vmstorage.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.vmstorage.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.vmstorage.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.vmstorage.service.type "NodePort") (eq .Values.vmstorage.service.type "LoadBalancer")) (not (empty .Values.vmstorage.service.nodePorts.http)) }} + nodePort: {{ .Values.vmstorage.service.nodePorts.http }} + {{- else if eq .Values.vmstorage.service.type "ClusterIP" }} + nodePort: null + {{- end }} + targetPort: http + {{- if .Values.vmstorage.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: vmstorage diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/servicemonitor.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/servicemonitor.yaml new file mode 100644 index 00000000..63ea54c1 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.vmstorage.metrics.enabled .Values.vmstorage.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "victoriametrics.vmstorage.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.vmstorage.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.vmstorage.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.vmstorage.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: vmstorage + {{- if .Values.vmstorage.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.vmstorage.metrics.serviceMonitor.interval }} + interval: {{ .Values.vmstorage.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.vmstorage.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.vmstorage.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.vmstorage.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.vmstorage.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.vmstorage.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/statefulset.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/statefulset.yaml new file mode 100644 index 00000000..40f409ec --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/statefulset.yaml @@ -0,0 +1,225 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.vmstorage.statefulsetAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.vmstorage.statefulsetAnnotations .Values.commonAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.vmstorage.autoscaling.hpa.enabled }} + replicas: {{ .Values.vmstorage.replicaCount }} + {{- end }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Retain + serviceName: {{ printf "%s-headless" (include "victoriametrics.vmstorage.fullname" .) | trunc 63 | trimSuffix "-" }} + podManagementPolicy: {{ .Values.vmstorage.podManagementPolicy }} + {{- if .Values.vmstorage.updateStrategy }} + updateStrategy: {{- toYaml .Values.vmstorage.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + template: + metadata: + {{- if .Values.vmstorage.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + spec: + serviceAccountName: {{ template "victoriametrics.vmstorage.serviceAccountName" . }} + {{- include "victoriametrics.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.vmstorage.automountServiceAccountToken }} + {{- if .Values.vmstorage.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmstorage.podAffinityPreset "component" "vmstorage" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.vmstorage.podAntiAffinityPreset "component" "vmstorage" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.vmstorage.nodeAffinityPreset.type "key" .Values.vmstorage.nodeAffinityPreset.key "values" .Values.vmstorage.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.vmstorage.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.priorityClassName }} + priorityClassName: {{ .Values.vmstorage.priorityClassName | quote }} + {{- end }} + {{- if .Values.vmstorage.schedulerName }} + schedulerName: {{ .Values.vmstorage.schedulerName | quote }} + {{- end }} + {{- if .Values.vmstorage.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmstorage.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.vmstorage.terminationGracePeriodSeconds }} + {{- end }} + {{- if or (and .Values.defaultInitContainers.volumePermissions.enabled .Values.vmstorage.persistence.enabled) .Values.vmstorage.initContainers }} + initContainers: + {{- if and .Values.defaultInitContainers.volumePermissions.enabled .Values.vmstorage.persistence.enabled }} + {{- include "victoriametrics.init-containers.volume-permissions" (dict "context" . "componentValues" .Values.vmstorage) | nindent 8 }} + {{- end }} + {{- if .Values.vmstorage.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: vmstorage + image: {{ template "victoriametrics.vmstorage.image" . }} + imagePullPolicy: {{ .Values.vmstorage.image.pullPolicy }} + {{- if .Values.vmstorage.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.vmstorage.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmstorage.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmstorage.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.args "context" $) | nindent 12 }} + {{- else }} + args: + - vmstorage + {{- if .Values.envflagEnable }} + - --envflag.enable + - --envflag.prefix={{ .Values.envflagPrefix }} + {{- end }} + - --loggerFormat={{ .Values.loggerFormat }} + - --httpListenAddr=:{{ .Values.vmstorage.containerPorts.http }} + - --vminsertAddr=:{{ .Values.vmstorage.containerPorts.vminsert }} + - --vmselectAddr=:{{ .Values.vmstorage.containerPorts.vmselect }} + - --retentionPeriod={{ .Values.vmstorage.retentionPeriod }} + - --storageDataPath={{ .Values.vmstorage.persistence.mountPath }} + {{- if .Values.vmstorage.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.vmstorage.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.vmstorage.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.vmstorage.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.vmstorage.resources }} + resources: {{- toYaml .Values.vmstorage.resources | nindent 12 }} + {{- else if ne .Values.vmstorage.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.vmstorage.resourcesPreset) | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.vmstorage.containerPorts.http }} + - name: tcp-vminsert + containerPort: {{ .Values.vmstorage.containerPorts.vminsert }} + - name: tcp-vmselect + containerPort: {{ .Values.vmstorage.containerPorts.vmselect }} + {{- if .Values.vmstorage.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmstorage.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmstorage.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- if .Values.vmstorage.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.vmstorage.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmstorage.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmstorage.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.vmstorage.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.vmstorage.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.vmstorage.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.vmstorage.persistence.enabled }} + - name: data + mountPath: {{ .Values.vmstorage.persistence.mountPath }} + {{- end }} + {{- if .Values.vmstorage.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.vmstorage.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.vmstorage.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if and .Values.vmstorage.persistence.enabled .Values.vmstorage.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ .Values.vmstorage.persistence.existingClaim }} + {{- else if (not .Values.vmstorage.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.vmstorage.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.vmstorage.persistence.enabled (not .Values.vmstorage.persistence.existingClaim) }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + annotations: + {{- if .Values.vmstorage.persistence.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + {{- if .Values.vmstorage.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + accessModes: + {{- range .Values.vmstorage.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.vmstorage.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.vmstorage.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.vmstorage.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.vmstorage.persistence.extraVolumeClaimTemplates }} + {{- include "common.tplvalues.render" (dict "value" .Values.vmstorage.persistence.extraVolumeClaimTemplates "context" $) | nindent 4 }} + {{- end }} + {{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/vpa.yaml b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/vpa.yaml new file mode 100644 index 00000000..b88c0b75 --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/templates/vmstorage/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.vmstorage.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" (dict "context" $) }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "victoriametrics.vmstorage.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: victoriametrics + app.kubernetes.io/component: vmstorage + {{- if or .Values.vmstorage.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.vmstorage.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: vmstorage + {{- with .Values.vmstorage.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmstorage.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.vmstorage.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "victoriametrics.vmstorage.fullname" . }} + {{- if .Values.vmstorage.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.vmstorage.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/victoriametrics/1/chart/victoriametrics/values.yaml b/addons/victoriametrics/1/chart/victoriametrics/values.yaml new file mode 100644 index 00000000..225c4b3c --- /dev/null +++ b/addons/victoriametrics/1/chart/victoriametrics/values.yaml @@ -0,0 +1,3670 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: true + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param apiVersions Override Kubernetes API versions reported by .Capabilities +## +apiVersions: [] +## @param nameOverride String to partially override common.names.name +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## @section VictoriaMetrics common parameters +## +## @param envflagEnable Enable envflag +## +envflagEnable: true +## @param envflagPrefix Prefix used for the envflag entries +## +envflagPrefix: VM_ +## @param loggerFormat Set format of the logs +## +loggerFormat: json + +## @section VictoriaMetrics Select Parameters +## +vmselect: + ## Drycc VictoriaMetrics Select image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vmselect/tags/ + ## @param vmselect.image.registry [default: REGISTRY_NAME] VictoriaMetrics Select image registry + ## @param vmselect.image.repository [default: REPOSITORY_NAME/victoriametrics-vmselect] VictoriaMetrics Select image repository + ## @skip vmselect.image.tag VictoriaMetrics Select image tag (immutable tags are recommended) + ## @param vmselect.image.digest VictoriaMetrics Select image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vmselect.image.pullPolicy VictoriaMetrics Select image pull policy + ## @param vmselect.image.pullSecrets VictoriaMetrics Select image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vmselect.replicaCount Number of VictoriaMetrics Select replicas to deploy + ## + replicaCount: 2 + ## @param vmselect.containerPorts.http VictoriaMetrics Select http container port + ## + containerPorts: + http: 8481 + ## Configure extra options for VictoriaMetrics Select containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vmselect.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Select containers + ## @param vmselect.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vmselect.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vmselect.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vmselect.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vmselect.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmselect.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Select containers + ## @param vmselect.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vmselect.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vmselect.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vmselect.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vmselect.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmselect.startupProbe.enabled Enable startupProbe on VictoriaMetrics Select containers + ## @param vmselect.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vmselect.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vmselect.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vmselect.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vmselect.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmselect.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vmselect.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vmselect.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Select resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vmselect.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmselect.resources is set (vmselect.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param vmselect.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vmselect.kind Define how to deploy VictoriaMetrics Select (allowed values: deployment or statefulset) + ## + kind: "deployment" + ## @param vmselect.podManagementPolicy Pod management policy for VictoriaMetrics Storage statefulset + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: Parallel + ## @param vmselect.annotations Annotations for VictoriaMetrics Select Deployment or StatefulSet + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vmselect.podSecurityContext.enabled Enabled VictoriaMetrics Select pods' Security Context + ## @param vmselect.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vmselect.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vmselect.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vmselect.podSecurityContext.fsGroup Set VictoriaMetrics Select pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vmselect.containerSecurityContext.enabled Enabled VictoriaMetrics Select containers' Security Context + ## @param vmselect.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vmselect.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vmselect.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vmselect.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Select containers' Security Context runAsNonRoot + ## @param vmselect.containerSecurityContext.privileged Set VictoriaMetrics Select containers' Security Context privileged + ## @param vmselect.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Select containers' Security Context runAsNonRoot + ## @param vmselect.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Select container's privilege escalation + ## @param vmselect.containerSecurityContext.capabilities.drop Set VictoriaMetrics Select container's Security Context runAsNonRoot + ## @param vmselect.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Select container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vmselect.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vmselect.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vmselect.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vmselect.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param vmselect.hostAliases VictoriaMetrics Select pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vmselect.podLabels Extra labels for VictoriaMetrics Select pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vmselect.podAnnotations Annotations for VictoriaMetrics Select pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vmselect.podAffinityPreset Pod affinity preset. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vmselect.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vmselect.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vmselect.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vmselect.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vmselect.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Select pods + ## @param vmselect.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vmselect.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vmselect.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vmselect.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vmselect.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vmselect.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Select pods + ## @param vmselect.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vmselect.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vmselect.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vmselect.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vmselect.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vmselect.nodeAffinityPreset.type Node affinity preset type. Ignored if `vmselect.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vmselect.nodeAffinityPreset.key Node label key to match. Ignored if `vmselect.affinity` is set + ## + key: "" + ## @param vmselect.nodeAffinityPreset.values Node label values to match. Ignored if `vmselect.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vmselect.affinity Affinity for VictoriaMetrics Select pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vmselect.podAffinityPreset`, `vmselect.podAntiAffinityPreset`, and `vmselect.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vmselect.nodeSelector Node labels for VictoriaMetrics Select pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vmselect.tolerations Tolerations for VictoriaMetrics Select pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vmselect.updateStrategy.type VictoriaMetrics Select statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vmselect.priorityClassName VictoriaMetrics Select pods' priorityClassName + ## + priorityClassName: "" + ## @param vmselect.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vmselect.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Select pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vmselect.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vmselect.lifecycleHooks for the VictoriaMetrics Select container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vmselect.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Select nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vmselect.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Select nodes + ## + extraEnvVarsCM: "" + ## @param vmselect.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Select nodes + ## + extraEnvVarsSecret: "" + ## @param vmselect.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Select pod(s) + ## + extraVolumes: [] + ## @param vmselect.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Select container(s) + ## + extraVolumeMounts: [] + ## @param vmselect.sidecars Add additional sidecar containers to the VictoriaMetrics Select pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vmselect.initContainers Add additional init containers to the VictoriaMetrics Select pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## @section VictoriaMetrics Select RBAC Parameters + ## + + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vmselect.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vmselect.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vmselect.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vmselect.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Select Traffic Exposure Parameters + ## + service: + ## @param vmselect.service.type VictoriaMetrics Select service type + ## + type: ClusterIP + ## @param vmselect.service.ports.http VictoriaMetrics Select service http port + ## + ports: + http: "8481" + ## Node ports to expose + ## @param vmselect.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param vmselect.service.clusterIP VictoriaMetrics Select service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vmselect.service.loadBalancerIP VictoriaMetrics Select service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vmselect.service.loadBalancerSourceRanges VictoriaMetrics Select service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vmselect.service.externalTrafficPolicy VictoriaMetrics Select service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vmselect.service.annotations [object] Additional custom annotations for VictoriaMetrics Select service + ## + annotations: {} + ## @param vmselect.service.extraPorts Extra ports to expose in VictoriaMetrics Select service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vmselect.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vmselect.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param vmselect.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + ## @param vmselect.ingress.enabled Enable ingress record generation for VictoriaMetrics Select + ## + enabled: false + ## @param vmselect.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param vmselect.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param vmselect.ingress.hostname Default host for the ingress record + ## + hostname: vmselect.local + ## @param vmselect.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param vmselect.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param vmselect.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param vmselect.ingress.tls Enable TLS configuration for the host defined at `vmselect.ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `vmselect.ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `vmselect.ingress.selfSigned=true` + ## + tls: false + ## @param vmselect.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param vmselect.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: victoriametrics select.local + ## path: / + ## + extraHosts: [] + ## @param vmselect.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param vmselect.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - victoriametrics select.local + ## secretName: victoriametrics select.local-tls + ## + extraTls: [] + ## @param vmselect.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: victoriametrics select.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param vmselect.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vmselect.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vmselect.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param vmselect.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vmselect.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vmselect.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vmselect.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vmselect.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section VictoriaMetrics Select Metrics Parameters + ## + ## Prometheus metrics + ## + metrics: + ## @param vmselect.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: true + ## @param vmselect.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vmselect.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vmselect.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vmselect.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vmselect.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vmselect.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vmselect.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vmselect.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vmselect.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vmselect.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vmselect.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vmselect.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + +## @section VictoriaMetrics Insert Parameters +## +vminsert: + ## Drycc VictoriaMetrics Insert image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vminsert/tags/ + ## @param vminsert.image.registry [default: REGISTRY_NAME] VictoriaMetrics Insert image registry + ## @param vminsert.image.repository [default: REPOSITORY_NAME/victoriametrics-vminsert] VictoriaMetrics Insert image repository + ## @skip vminsert.image.tag VictoriaMetrics Insert image tag (immutable tags are recommended) + ## @param vminsert.image.digest VictoriaMetrics Insert image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vminsert.image.pullPolicy VictoriaMetrics Insert image pull policy + ## @param vminsert.image.pullSecrets VictoriaMetrics Insert image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vminsert.replicaCount Number of VictoriaMetrics Insert replicas to deploy + ## + replicaCount: 2 + ## @param vminsert.containerPorts.http VictoriaMetrics Insert http container port + ## + containerPorts: + http: 8480 + ## Configure extra options for VictoriaMetrics Insert containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vminsert.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Insert containers + ## @param vminsert.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vminsert.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vminsert.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vminsert.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vminsert.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vminsert.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Insert containers + ## @param vminsert.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vminsert.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vminsert.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vminsert.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vminsert.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vminsert.startupProbe.enabled Enable startupProbe on VictoriaMetrics Insert containers + ## @param vminsert.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vminsert.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vminsert.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vminsert.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vminsert.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vminsert.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vminsert.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vminsert.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Insert resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vminsert.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vminsert.resources is set (vminsert.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param vminsert.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vminsert.deploymentAnnotations Annotations for VictoriaMetrics Insert Deployment + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + deploymentAnnotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vminsert.podSecurityContext.enabled Enabled VictoriaMetrics Insert pods' Security Context + ## @param vminsert.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vminsert.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vminsert.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vminsert.podSecurityContext.fsGroup Set VictoriaMetrics Insert pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vminsert.containerSecurityContext.enabled Enabled VictoriaMetrics Insert containers' Security Context + ## @param vminsert.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vminsert.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vminsert.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vminsert.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Insert containers' Security Context runAsNonRoot + ## @param vminsert.containerSecurityContext.privileged Set VictoriaMetrics Insert containers' Security Context privileged + ## @param vminsert.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Insert containers' Security Context runAsNonRoot + ## @param vminsert.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Insert container's privilege escalation + ## @param vminsert.containerSecurityContext.capabilities.drop Set VictoriaMetrics Insert container's Security Context runAsNonRoot + ## @param vminsert.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Insert container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vminsert.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vminsert.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vminsert.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vminsert.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param vminsert.hostAliases VictoriaMetrics Insert pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vminsert.podLabels Extra labels for VictoriaMetrics Insert pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vminsert.podAnnotations Annotations for VictoriaMetrics Insert pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vminsert.podAffinityPreset Pod affinity preset. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vminsert.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vminsert.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vminsert.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vminsert.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vminsert.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Insert pods + ## @param vminsert.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vminsert.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vminsert.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vminsert.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vminsert.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vminsert.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Insert pods + ## @param vminsert.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vminsert.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vminsert.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vminsert.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vminsert.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vminsert.nodeAffinityPreset.type Node affinity preset type. Ignored if `vminsert.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vminsert.nodeAffinityPreset.key Node label key to match. Ignored if `vminsert.affinity` is set + ## + key: "" + ## @param vminsert.nodeAffinityPreset.values Node label values to match. Ignored if `vminsert.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vminsert.affinity Affinity for VictoriaMetrics Insert pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vminsert.podAffinityPreset`, `vminsert.podAntiAffinityPreset`, and `vminsert.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vminsert.nodeSelector Node labels for VictoriaMetrics Insert pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vminsert.tolerations Tolerations for VictoriaMetrics Insert pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vminsert.updateStrategy.type VictoriaMetrics Insert statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vminsert.priorityClassName VictoriaMetrics Insert pods' priorityClassName + ## + priorityClassName: "" + ## @param vminsert.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vminsert.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Insert pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vminsert.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vminsert.lifecycleHooks for the VictoriaMetrics Insert container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vminsert.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Insert nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vminsert.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Insert nodes + ## + extraEnvVarsCM: "" + ## @param vminsert.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Insert nodes + ## + extraEnvVarsSecret: "" + ## @param vminsert.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Insert pod(s) + ## + extraVolumes: [] + ## @param vminsert.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Insert container(s) + ## + extraVolumeMounts: [] + ## @param vminsert.sidecars Add additional sidecar containers to the VictoriaMetrics Insert pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vminsert.initContainers Add additional init containers to the VictoriaMetrics Insert pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vminsert.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vminsert.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vminsert.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vminsert.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Insert Traffic Exposure Parameters + ## + service: + ## @param vminsert.service.type VictoriaMetrics Insert service type + ## + type: ClusterIP + ## @param vminsert.service.ports.http VictoriaMetrics Insert service http port + ## + ports: + http: "8480" + ## Node ports to expose + ## @param vminsert.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param vminsert.service.clusterIP VictoriaMetrics Insert service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vminsert.service.loadBalancerIP VictoriaMetrics Insert service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vminsert.service.loadBalancerSourceRanges VictoriaMetrics Insert service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vminsert.service.externalTrafficPolicy VictoriaMetrics Insert service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vminsert.service.annotations [object] Additional custom annotations for VictoriaMetrics Insert service + ## + annotations: {} + ## @param vminsert.service.extraPorts Extra ports to expose in VictoriaMetrics Insert service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vminsert.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vminsert.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + ## @param vminsert.ingress.enabled Enable ingress record generation for VictoriaMetrics Insert + ## + enabled: false + ## @param vminsert.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param vminsert.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param vminsert.ingress.hostname Default host for the ingress record + ## + hostname: vminsert.local + ## @param vminsert.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param vminsert.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param vminsert.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param vminsert.ingress.tls Enable TLS configuration for the host defined at `vminsert.ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `vminsert.ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `vminsert.ingress.selfSigned=true` + ## + tls: false + ## @param vminsert.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param vminsert.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: victoriametrics select.local + ## path: / + ## + extraHosts: [] + ## @param vminsert.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param vminsert.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - victoriametrics select.local + ## secretName: victoriametrics select.local-tls + ## + extraTls: [] + ## @param vminsert.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: victoriametrics select.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param vminsert.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vminsert.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vminsert.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param vminsert.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vminsert.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vminsert.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vminsert.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vminsert.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section VictoriaMetrics Insert Metrics Parameters + ## + ## Prometheus metrics + ## + metrics: + ## @param vminsert.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: true + ## @param vminsert.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vminsert.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vminsert.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vminsert.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vminsert.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vminsert.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vminsert.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vminsert.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vminsert.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vminsert.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vminsert.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vminsert.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + +## @section VictoriaMetrics Storage Parameters +## +vmstorage: + ## Drycc VictoriaMetrics Storage image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vmstorage/tags/ + ## @param vmstorage.image.registry [default: REGISTRY_NAME] VictoriaMetrics Storage image registry + ## @param vmstorage.image.repository [default: REPOSITORY_NAME/victoriametrics-vmstorage] VictoriaMetrics Storage image repository + ## @skip vmstorage.image.tag VictoriaMetrics Storage image tag (immutable tags are recommended) + ## @param vmstorage.image.digest VictoriaMetrics Storage image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vmstorage.image.pullPolicy VictoriaMetrics Storage image pull policy + ## @param vmstorage.image.pullSecrets VictoriaMetrics Storage image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vmstorage.replicaCount Number of VictoriaMetrics Storage replicas to deploy + ## + replicaCount: 3 + ## @param vmstorage.containerPorts.http VictoriaMetrics Storage http container port + ## @param vmstorage.containerPorts.vmselect VictoriaMetrics Storage vmselect container port + ## @param vmstorage.containerPorts.vminsert VictoriaMetrics Storage vminsert container port + ## + containerPorts: + http: 8482 + vmselect: 8401 + vminsert: 8400 + ## Configure extra options for VictoriaMetrics Storage containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vmstorage.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Storage containers + ## @param vmstorage.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vmstorage.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vmstorage.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vmstorage.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vmstorage.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmstorage.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Storage containers + ## @param vmstorage.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vmstorage.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vmstorage.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vmstorage.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vmstorage.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmstorage.startupProbe.enabled Enable startupProbe on VictoriaMetrics Storage containers + ## @param vmstorage.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vmstorage.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vmstorage.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vmstorage.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vmstorage.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmstorage.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vmstorage.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vmstorage.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Storage resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vmstorage.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmstorage.resources is set (vmstorage.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param vmstorage.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vmstorage.retentionPeriod Data retention period + ## + retentionPeriod: 30 + ## @param vmstorage.statefulsetAnnotations Annotations for VictoriaMetrics Storage statefulset + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + statefulsetAnnotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vmstorage.podSecurityContext.enabled Enabled VictoriaMetrics Storage pods' Security Context + ## @param vmstorage.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vmstorage.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vmstorage.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vmstorage.podSecurityContext.fsGroup Set VictoriaMetrics Storage pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vmstorage.containerSecurityContext.enabled Enabled VictoriaMetrics Storage containers' Security Context + ## @param vmstorage.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vmstorage.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vmstorage.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vmstorage.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Storage containers' Security Context runAsNonRoot + ## @param vmstorage.containerSecurityContext.privileged Set VictoriaMetrics Storage containers' Security Context privileged + ## @param vmstorage.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Storage containers' Security Context runAsNonRoot + ## @param vmstorage.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Storage container's privilege escalation + ## @param vmstorage.containerSecurityContext.capabilities.drop Set VictoriaMetrics Storage container's Security Context runAsNonRoot + ## @param vmstorage.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Storage container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vmstorage.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vmstorage.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vmstorage.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vmstorage.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param vmstorage.hostAliases VictoriaMetrics Storage pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vmstorage.podLabels Extra labels for VictoriaMetrics Storage pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vmstorage.podAnnotations Annotations for VictoriaMetrics Storage pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vmstorage.podAffinityPreset Pod affinity preset. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vmstorage.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vmstorage.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vmstorage.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vmstorage.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vmstorage.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Storage pods + ## @param vmstorage.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vmstorage.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vmstorage.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vmstorage.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vmstorage.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vmstorage.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Storage pods + ## @param vmstorage.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vmstorage.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vmstorage.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vmstorage.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vmstorage.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vmstorage.nodeAffinityPreset.type Node affinity preset type. Ignored if `vmstorage.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vmstorage.nodeAffinityPreset.key Node label key to match. Ignored if `vmstorage.affinity` is set + ## + key: "" + ## @param vmstorage.nodeAffinityPreset.values Node label values to match. Ignored if `vmstorage.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vmstorage.affinity Affinity for VictoriaMetrics Storage pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vmstorage.podAffinityPreset`, `vmstorage.podAntiAffinityPreset`, and `vmstorage.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vmstorage.nodeSelector Node labels for VictoriaMetrics Storage pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vmstorage.podManagementPolicy Pod management policy for VictoriaMetrics Storage statefulset + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: Parallel + ## @param vmstorage.tolerations Tolerations for VictoriaMetrics Storage pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vmstorage.updateStrategy.type VictoriaMetrics Storage statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vmstorage.priorityClassName VictoriaMetrics Storage pods' priorityClassName + ## + priorityClassName: "" + ## @param vmstorage.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vmstorage.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Storage pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vmstorage.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vmstorage.lifecycleHooks for the VictoriaMetrics Storage container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vmstorage.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Storage nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vmstorage.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Storage nodes + ## + extraEnvVarsCM: "" + ## @param vmstorage.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Storage nodes + ## + extraEnvVarsSecret: "" + ## @param vmstorage.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Storage pod(s) + ## + extraVolumes: [] + ## @param vmstorage.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Storage container(s) + ## + extraVolumeMounts: [] + ## @param vmstorage.sidecars Add additional sidecar containers to the VictoriaMetrics Storage pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vmstorage.initContainers Add additional init containers to the VictoriaMetrics Storage pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vmstorage.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vmstorage.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vmstorage.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vmstorage.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Storage Traffic Exposure Parameters + ## + service: + ## @param vmstorage.service.type VictoriaMetrics Storage service type + ## + type: ClusterIP + ## @param vmstorage.service.ports.http VictoriaMetrics Storage service http port + ## + ports: + http: "80" + ## Node ports to expose + ## @param vmstorage.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param vmstorage.service.clusterIP VictoriaMetrics Storage service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vmstorage.service.loadBalancerIP VictoriaMetrics Storage service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vmstorage.service.loadBalancerSourceRanges VictoriaMetrics Storage service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vmstorage.service.externalTrafficPolicy VictoriaMetrics Storage service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vmstorage.service.annotations [object] Additional custom annotations for VictoriaMetrics Storage service + ## + annotations: {} + ## @param vmstorage.service.extraPorts Extra ports to expose in VictoriaMetrics Storage service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vmstorage.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vmstorage.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param vmstorage.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vmstorage.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vmstorage.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param vmstorage.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vmstorage.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vmstorage.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vmstorage.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vmstorage.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section VictoriaMetrics Storage Persistence Parameters + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param vmstorage.persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: true + ## @param vmstorage.persistence.mountPath Persistent Volume mount root path + ## + mountPath: /opt/drycc/victoriametrics/data + ## @param vmstorage.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param vmstorage.persistence.accessModes [array] Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param vmstorage.persistence.size Persistent Volume size + ## + size: 10Gi + ## @param vmstorage.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param vmstorage.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param vmstorage.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param vmstorage.persistence.existingClaim The name of an existing PVC to use for persistence + ## + existingClaim: "" + ## @param vmstorage.persistence.extraVolumeClaimTemplates Add additional VolumeClaimTemplates for enabling any plugins or any other purpose + ## + extraVolumeClaimTemplates: [] + + ## @section VictoriaMetrics Storage Metrics Parameters + ## + + ## Prometheus metrics + ## + metrics: + ## @param vmstorage.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: true + ## @param vmstorage.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vmstorage.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vmstorage.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vmstorage.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vmstorage.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vmstorage.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vmstorage.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vmstorage.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vmstorage.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vmstorage.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vmstorage.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vmstorage.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + +## @section VictoriaMetrics Auth Parameters +## +vmauth: + ## @param vmauth.enabled Enable VictoriaMetrics Auth + ## + enabled: true + ## Drycc VictoriaMetrics Auth image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vmauth/tags/ + ## @param vmauth.image.registry [default: REGISTRY_NAME] VictoriaMetrics Auth image registry + ## @param vmauth.image.repository [default: REPOSITORY_NAME/victoriametrics-vmauth] VictoriaMetrics Auth image repository + ## @skip vmauth.image.tag VictoriaMetrics Auth image tag (immutable tags are recommended) + ## @param vmauth.image.digest VictoriaMetrics Auth image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vmauth.image.pullPolicy VictoriaMetrics Auth image pull policy + ## @param vmauth.image.pullSecrets VictoriaMetrics Auth image pull secrets + ## + user: + username: "admin1" + password: "admin" + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vmauth.replicaCount Number of VictoriaMetrics Auth replicas to deploy + ## + replicaCount: 2 + ## @param vmauth.containerPorts.http VictoriaMetrics Auth http container port + ## + containerPorts: + http: 8427 + ## Configure extra options for VictoriaMetrics Auth containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vmauth.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Auth containers + ## @param vmauth.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vmauth.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vmauth.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vmauth.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vmauth.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmauth.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Auth containers + ## @param vmauth.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vmauth.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vmauth.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vmauth.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vmauth.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmauth.startupProbe.enabled Enable startupProbe on VictoriaMetrics Auth containers + ## @param vmauth.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vmauth.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vmauth.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vmauth.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vmauth.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmauth.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vmauth.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vmauth.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Auth resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vmauth.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmauth.resources is set (vmauth.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param vmauth.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vmauth.kind Define how to deploy VictoriaMetrics Auth (allowed values: deployment or daemonset) + ## + kind: "deployment" + ## @param vmauth.annotations Annotations for VictoriaMetrics Auth Deployment or StatefulSet + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vmauth.podSecurityContext.enabled Enabled VictoriaMetrics Auth pods' Security Context + ## @param vmauth.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vmauth.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vmauth.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vmauth.podSecurityContext.fsGroup Set VictoriaMetrics Auth pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vmauth.containerSecurityContext.enabled Enabled VictoriaMetrics Auth containers' Security Context + ## @param vmauth.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vmauth.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vmauth.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vmauth.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Auth containers' Security Context runAsNonRoot + ## @param vmauth.containerSecurityContext.privileged Set VictoriaMetrics Auth containers' Security Context privileged + ## @param vmauth.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Auth containers' Security Context runAsNonRoot + ## @param vmauth.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Auth container's privilege escalation + ## @param vmauth.containerSecurityContext.capabilities.drop Set VictoriaMetrics Auth container's Security Context runAsNonRoot + ## @param vmauth.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Auth container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vmauth.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vmauth.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vmauth.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vmauth.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param vmauth.hostAliases VictoriaMetrics Auth pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vmauth.podLabels Extra labels for VictoriaMetrics Auth pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vmauth.podAnnotations Annotations for VictoriaMetrics Auth pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vmauth.podAffinityPreset Pod affinity preset. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vmauth.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vmauth.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vmauth.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vmauth.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param vmauth.configOverrides Overwrite or add extra configuration options to the chart default + ## Example: + ## configOverrides: + ## ip_filters: + ## allow_list: ["1.2.3.0/24", "127.0.0.1"] + ## + configOverrides: {} + ## @param vmauth.existingSecret The name of an existing Secret with configuration + ## + existingSecret: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vmauth.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Auth pods + ## @param vmauth.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vmauth.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vmauth.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vmauth.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vmauth.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vmauth.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Auth pods + ## @param vmauth.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vmauth.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vmauth.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vmauth.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vmauth.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vmauth.nodeAffinityPreset.type Node affinity preset type. Ignored if `vmauth.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vmauth.nodeAffinityPreset.key Node label key to match. Ignored if `vmauth.affinity` is set + ## + key: "" + ## @param vmauth.nodeAffinityPreset.values Node label values to match. Ignored if `vmauth.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vmauth.affinity Affinity for VictoriaMetrics Auth pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vmauth.podAffinityPreset`, `vmauth.podAntiAffinityPreset`, and `vmauth.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vmauth.nodeSelector Node labels for VictoriaMetrics Auth pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vmauth.tolerations Tolerations for VictoriaMetrics Auth pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vmauth.updateStrategy.type VictoriaMetrics Auth statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vmauth.priorityClassName VictoriaMetrics Auth pods' priorityClassName + ## + priorityClassName: "" + ## @param vmauth.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vmauth.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Auth pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vmauth.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vmauth.lifecycleHooks for the VictoriaMetrics Auth container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vmauth.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Auth nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vmauth.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Auth nodes + ## + extraEnvVarsCM: "" + ## @param vmauth.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Auth nodes + ## + extraEnvVarsSecret: "" + ## @param vmauth.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Auth pod(s) + ## + extraVolumes: [] + ## @param vmauth.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Auth container(s) + ## + extraVolumeMounts: [] + ## @param vmauth.sidecars Add additional sidecar containers to the VictoriaMetrics Auth pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vmauth.initContainers Add additional init containers to the VictoriaMetrics Auth pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## @section VictoriaMetrics Auth RBAC Parameters + ## + + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vmauth.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vmauth.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vmauth.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vmauth.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Auth Traffic Exposure Parameters + ## + service: + ## @param vmauth.service.type VictoriaMetrics Auth service type + ## + type: LoadBalancer + ## @param vmauth.service.ports.http VictoriaMetrics Auth service http port + ## + ports: + http: "8427" + ## Node ports to expose + ## @param vmauth.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param vmauth.service.clusterIP VictoriaMetrics Auth service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vmauth.service.loadBalancerIP VictoriaMetrics Auth service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vmauth.service.loadBalancerSourceRanges VictoriaMetrics Auth service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vmauth.service.externalTrafficPolicy VictoriaMetrics Auth service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vmauth.service.annotations [object] Additional custom annotations for VictoriaMetrics Auth service + ## + annotations: {} + ## @param vmauth.service.extraPorts Extra ports to expose in VictoriaMetrics Auth service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vmauth.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vmauth.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## ref: http://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + ## @param vmauth.ingress.enabled Enable ingress record generation for VictoriaMetrics Auth + ## + enabled: false + ## @param vmauth.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param vmauth.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param vmauth.ingress.hostname Default host for the ingress record + ## + hostname: vmauth.local + ## @param vmauth.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param vmauth.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param vmauth.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param vmauth.ingress.tls Enable TLS configuration for the host defined at `vmauth.ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `vmauth.ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `vmauth.ingress.selfSigned=true` + ## + tls: false + ## @param vmauth.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param vmauth.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: victoriametrics select.local + ## path: / + ## + extraHosts: [] + ## @param vmauth.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param vmauth.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - victoriametrics select.local + ## secretName: victoriametrics select.local-tls + ## + extraTls: [] + ## @param vmauth.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: victoriametrics select.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param vmauth.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vmauth.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vmauth.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param vmauth.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vmauth.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vmauth.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vmauth.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vmauth.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + allowNamespaces: + - "aaa" + - "bbb" + ## @section VictoriaMetrics Auth Metrics Parameters + ## + ## Prometheus metrics + ## + metrics: + ## @param vmauth.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: true + ## @param vmauth.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vmauth.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vmauth.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vmauth.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vmauth.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vmauth.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vmauth.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vmauth.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vmauth.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vmauth.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vmauth.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vmauth.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + +## @section VictoriaMetrics Agent Parameters +## +vmagent: + ## @param vmagent.enabled Enable VictoriaMetrics Agent + ## + enabled: true + ## Drycc VictoriaMetrics Agent image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vmagent/tags/ + ## @param vmagent.image.registry [default: REGISTRY_NAME] VictoriaMetrics Agent image registry + ## @param vmagent.image.repository [default: REPOSITORY_NAME/victoriametrics-vmagent] VictoriaMetrics Agent image repository + ## @skip vmagent.image.tag VictoriaMetrics Agent image tag (immutable tags are recommended) + ## @param vmagent.image.digest VictoriaMetrics Agent image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vmagent.image.pullPolicy VictoriaMetrics Agent image pull policy + ## @param vmagent.image.pullSecrets VictoriaMetrics Agent image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vmagent.replicaCount Number of VictoriaMetrics Agent replicas to deploy + ## + replicaCount: 1 + ## @param vmagent.containerPorts.http VictoriaMetrics Agent http container port + ## @param vmagent.containerPorts.graphite VictoriaMetrics Agent graphite container port + ## @param vmagent.containerPorts.opentsdb VictoriaMetrics Agent opentsdb container port + ## @param vmagent.containerPorts.influxdb VictoriaMetrics Agent influxdb container port + ## + containerPorts: + http: 8429 + graphite: 2003 + opentsdb: 4242 + influxdb: 8089 + ## Configure extra options for VictoriaMetrics Agent containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vmagent.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Agent containers + ## @param vmagent.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vmagent.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vmagent.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vmagent.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vmagent.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmagent.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Agent containers + ## @param vmagent.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vmagent.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vmagent.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vmagent.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vmagent.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmagent.startupProbe.enabled Enable startupProbe on VictoriaMetrics Agent containers + ## @param vmagent.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vmagent.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vmagent.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vmagent.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vmagent.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmagent.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vmagent.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vmagent.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Agent resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vmagent.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmagent.resources is set (vmagent.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param vmagent.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vmagent.kind Define how to deploy VictoriaMetrics Agent (allowed values: deployment or daemonset) + ## + kind: "deployment" + ## @param vmagent.annotations Annotations for VictoriaMetrics Agent Deployment or StatefulSet + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vmagent.podSecurityContext.enabled Enabled VictoriaMetrics Agent pods' Security Context + ## @param vmagent.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vmagent.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vmagent.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vmagent.podSecurityContext.fsGroup Set VictoriaMetrics Agent pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vmagent.containerSecurityContext.enabled Enabled VictoriaMetrics Agent containers' Security Context + ## @param vmagent.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vmagent.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vmagent.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vmagent.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Agent containers' Security Context runAsNonRoot + ## @param vmagent.containerSecurityContext.privileged Set VictoriaMetrics Agent containers' Security Context privileged + ## @param vmagent.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Agent containers' Security Context runAsNonRoot + ## @param vmagent.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Agent container's privilege escalation + ## @param vmagent.containerSecurityContext.capabilities.drop Set VictoriaMetrics Agent container's Security Context runAsNonRoot + ## @param vmagent.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Agent container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vmagent.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vmagent.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vmagent.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vmagent.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: true + ## @param vmagent.hostAliases VictoriaMetrics Agent pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vmagent.podLabels Extra labels for VictoriaMetrics Agent pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vmagent.podAnnotations Annotations for VictoriaMetrics Agent pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vmagent.podAffinityPreset Pod affinity preset. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vmagent.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vmagent.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vmagent.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vmagent.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param vmagent.scrapeConfigOverrides Overwrite or add extra scraping configuration options to the chart default + ## Example: + # configOverrides: + # ip_filters: + # allow_list: ["1.2.3.0/24", "127.0.0.1"] + # + scrapeConfigOverrides: {} + extraJobs: [] + ## @param vmagent.enableListeners.influxdb Enable influxdb listener + ## @param vmagent.enableListeners.opentsdb Enable graphite listener + ## @param vmagent.enableListeners.graphite Enable opentsdb listener + enableListeners: + influxdb: false + opentsdb: false + graphite: false + ## @param vmagent.existingScrapeConfigMap The name of an existing ConfigMap with the scrape configuration + ## + existingScrapeConfigMap: "" + ## @param vmagent.namespaced Only scrape in the deployed namespace + ## + namespaced: true + ## @param vmagent.allowedMetricsEndpoints [array] Allowed metrics endpoints to scrape (when not namespaced) + ## + allowedMetricsEndpoints: + - /metrics + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vmagent.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Agent pods + ## @param vmagent.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vmagent.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vmagent.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vmagent.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vmagent.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vmagent.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Agent pods + ## @param vmagent.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vmagent.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vmagent.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vmagent.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vmagent.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vmagent.nodeAffinityPreset.type Node affinity preset type. Ignored if `vmagent.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vmagent.nodeAffinityPreset.key Node label key to match. Ignored if `vmagent.affinity` is set + ## + key: "" + ## @param vmagent.nodeAffinityPreset.values Node label values to match. Ignored if `vmagent.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vmagent.affinity Affinity for VictoriaMetrics Agent pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vmagent.podAffinityPreset`, `vmagent.podAntiAffinityPreset`, and `vmagent.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vmagent.nodeSelector Node labels for VictoriaMetrics Agent pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vmagent.tolerations Tolerations for VictoriaMetrics Agent pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vmagent.updateStrategy.type VictoriaMetrics Agent statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vmagent.priorityClassName VictoriaMetrics Agent pods' priorityClassName + ## + priorityClassName: "" + ## @param vmagent.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vmagent.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Agent pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vmagent.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vmagent.lifecycleHooks for the VictoriaMetrics Agent container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vmagent.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Agent nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vmagent.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Agent nodes + ## + extraEnvVarsCM: "" + ## @param vmagent.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Agent nodes + ## + extraEnvVarsSecret: "" + ## @param vmagent.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Agent pod(s) + ## + extraVolumes: [] + ## @param vmagent.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Agent container(s) + ## + extraVolumeMounts: [] + ## @param vmagent.sidecars Add additional sidecar containers to the VictoriaMetrics Agent pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vmagent.initContainers Add additional init containers to the VictoriaMetrics Agent pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## @section VictoriaMetrics Agent RBAC Parameters + ## + ## RBAC configuration + ## + rbac: + ## @param vmagent.rbac.create Specifies whether RBAC resources should be created + ## + create: true + ## @param vmagent.rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vmagent.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vmagent.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vmagent.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vmagent.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Agent Traffic Exposure Parameters + ## + service: + ## @param vmagent.service.type VictoriaMetrics Agent service type + ## + type: ClusterIP + ## @param vmagent.service.ports.http VictoriaMetrics Agent service http port + ## @param vmagent.service.ports.graphite VictoriaMetrics Agent service graphite port + ## @param vmagent.service.ports.opentsdb VictoriaMetrics Agent service opentsdb port + ## @param vmagent.service.ports.influxdb VictoriaMetrics Agent service influxdb port + ## + ports: + http: "8429" + graphite: 2003 + opentsdb: 4242 + influxdb: 8089 + ## Node ports to expose + ## @param vmagent.service.nodePorts.http Node port for HTTP + ## @param vmagent.service.nodePorts.graphite Node port for graphite + ## @param vmagent.service.nodePorts.opentsdb Node port for opentsdb + ## @param vmagent.service.nodePorts.influxdb Node port for influxdb + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + graphite: "" + opentsdb: "" + influxdb: "" + ## @param vmagent.service.clusterIP VictoriaMetrics Agent service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vmagent.service.loadBalancerIP VictoriaMetrics Agent service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vmagent.service.loadBalancerSourceRanges VictoriaMetrics Agent service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vmagent.service.externalTrafficPolicy VictoriaMetrics Agent service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vmagent.service.annotations [object] Additional custom annotations for VictoriaMetrics Agent service + ## + annotations: {} + ## @param vmagent.service.extraPorts Extra ports to expose in VictoriaMetrics Agent service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vmagent.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vmagent.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vmagent.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vmagent.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param vmagent.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vmagent.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vmagent.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vmagent.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vmagent.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section VictoriaMetrics Agent Metrics Parameters + ## + ## Prometheus metrics + ## + metrics: + ## @param vmagent.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: true + ## @param vmagent.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vmagent.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vmagent.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vmagent.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vmagent.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vmagent.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vmagent.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vmagent.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vmagent.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vmagent.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vmagent.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vmagent.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + scrapeInterval: 30s + scrapeTimeout: 30s +## @section VictoriaMetrics Alert Parameters +## +vmalert: + ## @param vmalert.enabled Enable VictoriaMetrics Alert + ## + enabled: false + ## Drycc VictoriaMetrics Alert image + ## ref: https://hub.docker.com/r/drycc/victoriametrics-vmalert/tags/ + ## @param vmalert.image.registry [default: REGISTRY_NAME] VictoriaMetrics Alert image registry + ## @param vmalert.image.repository [default: REPOSITORY_NAME/victoriametrics-vmalert] VictoriaMetrics Alert image repository + ## @skip vmalert.image.tag VictoriaMetrics Alert image tag (immutable tags are recommended) + ## @param vmalert.image.digest VictoriaMetrics Alert image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param vmalert.image.pullPolicy VictoriaMetrics Alert image pull policy + ## @param vmalert.image.pullSecrets VictoriaMetrics Alert image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param vmalert.replicaCount Number of VictoriaMetrics Alert replicas to deploy + ## + replicaCount: 1 + ## @param vmalert.containerPorts.http VictoriaMetrics Alert http container port + ## + containerPorts: + http: 8429 + ## Configure extra options for VictoriaMetrics Alert containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param vmalert.livenessProbe.enabled Enable livenessProbe on VictoriaMetrics Alert containers + ## @param vmalert.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param vmalert.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param vmalert.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param vmalert.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param vmalert.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmalert.readinessProbe.enabled Enable readinessProbe on VictoriaMetrics Alert containers + ## @param vmalert.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param vmalert.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param vmalert.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param vmalert.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param vmalert.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmalert.startupProbe.enabled Enable startupProbe on VictoriaMetrics Alert containers + ## @param vmalert.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param vmalert.startupProbe.periodSeconds Period seconds for startupProbe + ## @param vmalert.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param vmalert.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param vmalert.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param vmalert.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param vmalert.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param vmalert.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## VictoriaMetrics Alert resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param vmalert.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if vmalert.resources is set (vmalert.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param vmalert.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param vmalert.datasourceUrl URL to an external datasource (uses VictoriaMetrics Insert if not set) + ## + datasourceUrl: "" + ## @param vmalert.notifierUrl URL to a notifier like AlertManager (necessary when alert rules are set) + ## + notifierUrl: "" + ## @param vmalert.deploymentAnnotations Annotations for VictoriaMetrics Alert Deployment + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + deploymentAnnotations: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param vmalert.podSecurityContext.enabled Enabled VictoriaMetrics Alert pods' Security Context + ## @param vmalert.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param vmalert.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param vmalert.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param vmalert.podSecurityContext.fsGroup Set VictoriaMetrics Alert pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param vmalert.containerSecurityContext.enabled Enabled VictoriaMetrics Alert containers' Security Context + ## @param vmalert.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param vmalert.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param vmalert.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param vmalert.containerSecurityContext.runAsNonRoot Set VictoriaMetrics Alert containers' Security Context runAsNonRoot + ## @param vmalert.containerSecurityContext.privileged Set VictoriaMetrics Alert containers' Security Context privileged + ## @param vmalert.containerSecurityContext.readOnlyRootFilesystem Set VictoriaMetrics Alert containers' Security Context runAsNonRoot + ## @param vmalert.containerSecurityContext.allowPrivilegeEscalation Set VictoriaMetrics Alert container's privilege escalation + ## @param vmalert.containerSecurityContext.capabilities.drop Set VictoriaMetrics Alert container's Security Context runAsNonRoot + ## @param vmalert.containerSecurityContext.seccompProfile.type Set VictoriaMetrics Alert container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + ## @param vmalert.command Override default container command (useful when using custom images) + ## + command: [] + ## @param vmalert.args Override default container args (useful when using custom images) + ## + args: [] + ## @param vmalert.extraArgs Add extra arguments to the default command + ## + extraArgs: [] + ## @param vmalert.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: true + ## @param vmalert.hostAliases VictoriaMetrics Alert pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param vmalert.podLabels Extra labels for VictoriaMetrics Alert pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param vmalert.podAnnotations Annotations for VictoriaMetrics Alert pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param vmalert.podAffinityPreset Pod affinity preset. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param vmalert.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param vmalert.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param vmalert.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param vmalert.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param vmalert.rulesConfigOverrides Overwrite or add extra rules configuration options to the chart default + ## Example: + ## rulesonfigOverrides: + ## groups: + ## - name: AlertGroupName + ## rules: + ## - alert: AlertName + ## expr: any_metric > 100 + ## for: 30s + ## labels: + ## alertname: 'Any metric is too high' + ## severity: 'warning' + ## + rulesConfigOverrides: {} + ## @param vmalert.existingRulesConfigMap The name of an existing ConfigMap with the rules configuration + ## + existingRulesConfigMap: "" + ## Autoscaling configuration + ## ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/ + ## + autoscaling: + ## @param vmalert.autoscaling.vpa.enabled Enable VPA for VictoriaMetrics Alert pods + ## @param vmalert.autoscaling.vpa.annotations Annotations for VPA resource + ## @param vmalert.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## @param vmalert.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## @param vmalert.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## + vpa: + enabled: false + annotations: {} + controlledResources: [] + maxAllowed: {} + minAllowed: {} + ## @param vmalert.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy + ## Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updatePolicy: + updateMode: Auto + ## @param vmalert.autoscaling.hpa.enabled Enable HPA for VictoriaMetrics Alert pods + ## @param vmalert.autoscaling.hpa.minReplicas Minimum number of replicas + ## @param vmalert.autoscaling.hpa.maxReplicas Maximum number of replicas + ## @param vmalert.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## @param vmalert.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + hpa: + enabled: false + minReplicas: "" + maxReplicas: "" + targetCPU: "" + targetMemory: "" + ## Node vmalert.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param vmalert.nodeAffinityPreset.type Node affinity preset type. Ignored if `vmalert.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param vmalert.nodeAffinityPreset.key Node label key to match. Ignored if `vmalert.affinity` is set + ## + key: "" + ## @param vmalert.nodeAffinityPreset.values Node label values to match. Ignored if `vmalert.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param vmalert.affinity Affinity for VictoriaMetrics Alert pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `vmalert.podAffinityPreset`, `vmalert.podAntiAffinityPreset`, and `vmalert.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param vmalert.nodeSelector Node labels for VictoriaMetrics Alert pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param vmalert.tolerations Tolerations for VictoriaMetrics Alert pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param vmalert.updateStrategy.type VictoriaMetrics Alert statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param vmalert.priorityClassName VictoriaMetrics Alert pods' priorityClassName + ## + priorityClassName: "" + ## @param vmalert.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param vmalert.schedulerName Name of the k8s scheduler (other than default) for VictoriaMetrics Alert pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param vmalert.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param vmalert.lifecycleHooks for the VictoriaMetrics Alert container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param vmalert.extraEnvVars Array with extra environment variables to add to VictoriaMetrics Alert nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param vmalert.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for VictoriaMetrics Alert nodes + ## + extraEnvVarsCM: "" + ## @param vmalert.extraEnvVarsSecret Name of existing Secret containing extra env vars for VictoriaMetrics Alert nodes + ## + extraEnvVarsSecret: "" + ## @param vmalert.extraVolumes Optionally specify extra list of additional volumes for the VictoriaMetrics Alert pod(s) + ## + extraVolumes: [] + ## @param vmalert.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the VictoriaMetrics Alert container(s) + ## + extraVolumeMounts: [] + ## @param vmalert.sidecars Add additional sidecar containers to the VictoriaMetrics Alert pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param vmalert.initContainers Add additional init containers to the VictoriaMetrics Alert pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## ServiceAccount configuration + ## + serviceAccount: + ## @param vmalert.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param vmalert.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param vmalert.serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param vmalert.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## @section VictoriaMetrics Alert Traffic Exposure Parameters + ## + service: + ## @param vmalert.service.type VictoriaMetrics Alert service type + ## + type: ClusterIP + ## @param vmalert.service.ports.http VictoriaMetrics Alert service http port + ## + ports: + http: "8429" + ## Node ports to expose + ## @param vmalert.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + ## @param vmalert.service.clusterIP VictoriaMetrics Alert service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param vmalert.service.loadBalancerIP VictoriaMetrics Alert service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param vmalert.service.loadBalancerSourceRanges VictoriaMetrics Alert service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param vmalert.service.externalTrafficPolicy VictoriaMetrics Alert service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param vmalert.service.annotations [object] Additional custom annotations for VictoriaMetrics Alert service + ## + annotations: {} + ## @param vmalert.service.extraPorts Extra ports to expose in VictoriaMetrics Alert service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param vmalert.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param vmalert.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param vmalert.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param vmalert.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param vmalert.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param vmalert.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param vmalert.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param vmalert.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param vmalert.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section VictoriaMetrics Alert Metrics Parameters + ## + ## Prometheus metrics + ## + metrics: + ## @param vmalert.metrics.enabled Enable the export of Prometheus metrics + ## + enabled: false + ## @param vmalert.metrics.annotations Additional custom annotations for the service + ## + annotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param vmalert.metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param vmalert.metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param vmalert.metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor + ## + annotations: {} + ## @param vmalert.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param vmalert.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param vmalert.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param vmalert.metrics.serviceMonitor.interval Interval at which metrics should be rulesd. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param vmalert.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param vmalert.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param vmalert.metrics.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ## @param vmalert.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/main/drycc/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + +## @section Default Init Containers Parameters +## +defaultInitContainers: + ## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node + ## + volumePermissions: + ## @param defaultInitContainers.volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/drycc/os-shell/tags/ + ## @param defaultInitContainers.volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param defaultInitContainers.volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip defaultInitContainers.volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param defaultInitContainers.volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param defaultInitContainers.volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param defaultInitContainers.volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/victoriametrics + tag: 0.0.1 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/drycc/charts/blob/main/drycc/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.enabled Enable securityContext in the init container + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 0 diff --git a/addons/victoriametrics/1/demo.yaml b/addons/victoriametrics/1/demo.yaml new file mode 100644 index 00000000..0ad18c8d --- /dev/null +++ b/addons/victoriametrics/1/demo.yaml @@ -0,0 +1,17 @@ +vmauth: + user: + username: admin + password: admin + networkPolicy: + allowNamespaces: + - "ns1" + - "ns2" + service: + type: LoadBalancer + +vmagent: + scrapeInterval: 60s + scrapeTimeout: 10s + +vmstorage: + retentionPeriod: 30d \ No newline at end of file diff --git a/addons/victoriametrics/1/meta.yaml b/addons/victoriametrics/1/meta.yaml new file mode 100644 index 00000000..377b83d7 --- /dev/null +++ b/addons/victoriametrics/1/meta.yaml @@ -0,0 +1,45 @@ +name: victoriametrics +version: "1.0" +id: 6ee5a947-61b6-47ce-b27d-fe7a5246ea3b +description: "victoriametrics" +displayName: "victoriametrics" +metadata: + displayName: "victoriametrics" + provider: + name: drycc + supportURL: https://victoriametrics.com/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/victoriametrics +tags: victoriametrics +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "vmauth.user.username" + required: false + description: "username for mvauth" +- name: "vmauth.user.password" + required: false + description: "password for mvauth" +- name: "vmauth.networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces" +- name: "vmauth.service.type" + required: false + description: "networkPolicy ClusterIP or LoadBalancer" +- name: "vmagent.scrapeInterval" + required: false + description: "scrape interval for vmagent" +- name: "vmagent.scrapeTimeout" + required: false + description: "scrape timeout for vmagent" +- name: "vmagent.scrapeConfigOverrides" + required: false + description: "scrape config overrides for vmagent" +- name: "vmagent.extraJobs" + required: false + description: "extra jobs for vmagent" +- name: "vmstorage.retentionPeriod" + required: false + description: "retention period for vmstorage" +archive: false diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/bind.yaml b/addons/victoriametrics/1/plans/standard-16c32g500/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-16c32g500/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/meta.yaml b/addons/victoriametrics/1/plans/standard-16c32g500/meta.yaml new file mode 100644 index 00000000..9278d834 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-16c32g500/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c32g500" +id: 99b0550c-4fd6-11f0-98ea-f3c453bc45ed +description: "VictoriaMetrics Standard Plan with 16 CPU, 32GB Memory" +displayName: "standard-16c32g500" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml b/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml new file mode 100644 index 00000000..7ebeff88 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml @@ -0,0 +1,44 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-vmcluster-standard-16c32g500" +vmauth: + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1600m + memory: 32Gi + replicaCount: 2 + +vmselect: + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1600m + memory: 32Gi + replicaCount: 2 + +vminsert: + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1600m + memory: 32Gi + replicaCount: 2 + +vmstorage: + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1600m + memory: 32Gi + replicaCount: 3 + persistence: + size: 500Gi diff --git a/addons/victoriametrics/1/plans/standard-1c1g10/bind.yaml b/addons/victoriametrics/1/plans/standard-1c1g10/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-1c1g10/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-1c1g10/meta.yaml b/addons/victoriametrics/1/plans/standard-1c1g10/meta.yaml new file mode 100644 index 00000000..fe439e08 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-1c1g10/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c1g10" +id: a69619c6-4ff6-11f0-b5d5-3b23b0dd4860 +description: "VictoriaMetrics Standard Plan with 1 CPU, 2GB Memory" +displayName: "standard-1c1g10" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/victoriametrics/1/plans/standard-1c1g10/values.yaml b/addons/victoriametrics/1/plans/standard-1c1g10/values.yaml new file mode 100644 index 00000000..5815cabe --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-1c1g10/values.yaml @@ -0,0 +1,44 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-vmcluster-standard-1c1g10" +vmauth: + resources: + requests: + cpu: 100m + memory: 16Mi + limits: + cpu: 500m + memory: 512Mi + replicaCount: 2 + +vmselect: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 500m + memory: 512Mi + replicaCount: 2 + +vminsert: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 500m + memory: 512Mi + replicaCount: 2 + +vmstorage: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 500m + memory: 512Mi + replicaCount: 3 + persistence: + size: 10Gi diff --git a/addons/victoriametrics/1/plans/standard-2c4g50/bind.yaml b/addons/victoriametrics/1/plans/standard-2c4g50/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-2c4g50/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-2c4g50/meta.yaml b/addons/victoriametrics/1/plans/standard-2c4g50/meta.yaml new file mode 100644 index 00000000..c964810e --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-2c4g50/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g50" +id: ba1c92c3-03a1-4825-ad74-02490a0e0447 +description: "VictoriaMetrics Standard Plan with 2 CPU, 4GB Memory" +displayName: "standard-2c4g50" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/victoriametrics/1/plans/standard-2c4g50/values.yaml b/addons/victoriametrics/1/plans/standard-2c4g50/values.yaml new file mode 100644 index 00000000..646122f8 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-2c4g50/values.yaml @@ -0,0 +1,44 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-vmcluster-standard-2c4g50" +vmauth: + resources: + requests: + cpu: 100m + memory: 16Mi + limits: + cpu: 2000m + memory: 4096Mi + replicaCount: 2 + +vmselect: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 2000m + memory: 4096Mi + replicaCount: 2 + +vminsert: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 2000m + memory: 4096Mi + replicaCount: 2 + +vmstorage: + resources: + requests: + cpu: 100m + memory: 32Mi + limits: + cpu: 2000m + memory: 4096Mi + replicaCount: 3 + persistence: + size: 50Gi diff --git a/addons/victoriametrics/1/plans/standard-4c8g100/bind.yaml b/addons/victoriametrics/1/plans/standard-4c8g100/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-4c8g100/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-4c8g100/meta.yaml b/addons/victoriametrics/1/plans/standard-4c8g100/meta.yaml new file mode 100644 index 00000000..f8d06865 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-4c8g100/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g100" +id: 4ed63e32-4fd3-11f0-934c-ff0f8d3bcb1d +description: "VictoriaMetrics Standard Plan with 4 CPU, 8GB Memory" +displayName: "standard-4c8g100" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/victoriametrics/1/plans/standard-4c8g100/values.yaml b/addons/victoriametrics/1/plans/standard-4c8g100/values.yaml new file mode 100644 index 00000000..48f33c01 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-4c8g100/values.yaml @@ -0,0 +1,44 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-vmcluster-standard-4c8g100" +vmauth: + resources: + requests: + cpu: 1000m + memory: 512Mi + limits: + cpu: 4000m + memory: 8Gi + replicaCount: 2 + +vmselect: + resources: + requests: + cpu: 1000m + memory: 512Mi + limits: + cpu: 4000m + memory: 8Gi + replicaCount: 2 + +vminsert: + resources: + requests: + cpu: 1000m + memory: 512Mi + limits: + cpu: 4000m + memory: 8Gi + replicaCount: 2 + +vmstorage: + resources: + requests: + cpu: 1000m + memory: 512Mi + memory: + cpu: 4000m + memory: 8Gi + replicaCount: 3 + persistence: + size: 100Gi diff --git a/addons/victoriametrics/1/plans/standard-8c16g200/bind.yaml b/addons/victoriametrics/1/plans/standard-8c16g200/bind.yaml new file mode 100644 index 00000000..2889e803 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-8c16g200/bind.yaml @@ -0,0 +1,24 @@ +credential: + - name: {{ printf "DOMAIN" }} + value: {{ printf "%s" (include "common.names.fullname" .) }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} +{{- if (eq .Values.vmauth.service.type "LoadBalancer") }} + - name: EXTRANET_HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: HOST + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + - name: PORT + valueFrom: + serviceRef: + name: {{ template "victoriametrics.vmauth.fullname" . }} + jsonpath: ' { .spec.ports[?(@.name=="http")].port }' + - name: USER + value: {{ .Values.vmauth.user.username }} + - name: PASSWORD + value: {{ .Values.vmauth.user.password }} diff --git a/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-8c16g200/meta.yaml b/addons/victoriametrics/1/plans/standard-8c16g200/meta.yaml new file mode 100644 index 00000000..b4f8b520 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-8c16g200/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g200" +id: 8e34beb6-4fd6-11f0-91bf-fb3650f4e494 +description: "VictoriaMetrics Standard Plan with 8 CPU, 16GB Memory" +displayName: "standard-8c16g200" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/victoriametrics/1/plans/standard-8c16g200/values.yaml b/addons/victoriametrics/1/plans/standard-8c16g200/values.yaml new file mode 100644 index 00000000..97fb68a1 --- /dev/null +++ b/addons/victoriametrics/1/plans/standard-8c16g200/values.yaml @@ -0,0 +1,44 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-vmcluster-standard-8c16g200" +vmauth: + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 8000m + memory: 16Gi + replicaCount: 2 + +vmselect: + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 8000m + memory: 16Gi + replicaCount: 2 + +vminsert: + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 8000m + memory: 16Gi + replicaCount: 2 + +vmstorage: + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 8000m + memory: 16Gi + replicaCount: 3 + persistence: + size: 200Gi From ea690c79fa5e64685f449e33aafbaca183e3efc1 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 26 Jun 2025 13:40:21 +0800 Subject: [PATCH 63/93] chore(victoriametrics): change images --- .../1/chart/victoriametrics/values.yaml | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/addons/victoriametrics/1/chart/victoriametrics/values.yaml b/addons/victoriametrics/1/chart/victoriametrics/values.yaml index 225c4b3c..5e0836af 100644 --- a/addons/victoriametrics/1/chart/victoriametrics/values.yaml +++ b/addons/victoriametrics/1/chart/victoriametrics/values.yaml @@ -90,8 +90,8 @@ vmselect: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -713,8 +713,8 @@ vminsert: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -1321,8 +1321,8 @@ vmstorage: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -1887,12 +1887,13 @@ vmauth: ## @param vmauth.image.pullSecrets VictoriaMetrics Auth image pull secrets ## user: - username: "admin1" + username: "admin" password: "admin" + image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -2519,8 +2520,8 @@ vmagent: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -3094,8 +3095,8 @@ vmalert: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -3628,8 +3629,8 @@ defaultInitContainers: ## image: registry: registry.drycc.cc - repository: drycc/victoriametrics - tag: 0.0.1 + repository: drycc-addons/victoriametrics + tag: 1 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. From 57e08701dfdb232fb7e5d17e6670eb577bf45d06 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 27 Jun 2025 17:08:17 +0800 Subject: [PATCH 64/93] choore(addons): modify plans requests --- .../2/plans/standard-16c48g2w/values.yaml | 12 +++++----- .../2/plans/standard-1c2g2w/values.yaml | 24 +++++++++---------- .../2/plans/standard-24c64g7w/values.yaml | 12 +++++----- .../2/plans/standard-2c4g2w/values.yaml | 18 +++++++------- .../2/plans/standard-4c16g2w/values.yaml | 12 +++++----- .../2/plans/standard-4c8g2w/values.yaml | 18 +++++++------- .../2/plans/standard-8c32g2w/values.yaml | 12 +++++----- .../1.17/plans/standard-2c4g5w/values.yaml | 8 +++---- .../1.17/plans/standard-4c8g5w/values.yaml | 8 +++---- .../3.6/plans/standard-16c32g3w/values.yaml | 4 ++-- .../3.6/plans/standard-1c2g3w/values.yaml | 6 ++--- .../3.6/plans/standard-24c64g3w/values.yaml | 4 ++-- .../3.6/plans/standard-2c4g3w/values.yaml | 6 ++--- .../3.6/plans/standard-4c8g3w/values.yaml | 4 ++-- .../3.6/plans/standard-8c16g3w/values.yaml | 4 ++-- .../plans/standard-16c32g1024/values.yaml | 16 ++++++------- .../2.10/plans/standard-1c2g64/values.yaml | 16 ++++++------- .../2.10/plans/standard-2c4g128/values.yaml | 16 ++++++------- .../2.10/plans/standard-4c8g256/values.yaml | 16 ++++++------- .../2.10/plans/standard-8c16g512/values.yaml | 16 ++++++------- .../2.8/plans/standard-16c32g1024/values.yaml | 16 ++++++------- .../2.8/plans/standard-1c2g64/values.yaml | 16 ++++++------- .../2.8/plans/standard-2c4g128/values.yaml | 16 ++++++------- .../2.8/plans/standard-4c8g256/values.yaml | 16 ++++++------- .../2.8/plans/standard-8c16g512/values.yaml | 16 ++++++------- .../2023/plans/standard-v4s1024/values.yaml | 4 ++-- .../2023/plans/standard-v4s128/values.yaml | 4 ++-- .../2023/plans/standard-v4s2048/values.yaml | 4 ++-- .../2023/plans/standard-v4s256/values.yaml | 4 ++-- .../2023/plans/standard-v4s3096/values.yaml | 4 ++-- .../2023/plans/standard-v4s32/values.yaml | 4 ++-- .../2023/plans/standard-v4s512/values.yaml | 4 ++-- .../2023/plans/standard-v4s64/values.yaml | 4 ++-- .../2023/plans/standard-v6d4s1T/values.yaml | 4 ++-- .../2023/plans/standard-v8d4s1T/values.yaml | 4 ++-- .../2023/plans/standard-v8d4s2T/values.yaml | 4 ++-- .../2023/plans/standard-v8d4s3T/values.yaml | 4 ++-- .../2.10/plans/standard-2c4g32/values.yaml | 8 +++---- .../2.10/plans/standard-2c4g64/values.yaml | 8 +++---- .../2.10/plans/standard-4c16g256/values.yaml | 8 +++---- .../2.10/plans/standard-4c16g512/values.yaml | 6 ++--- .../2.10/plans/standard-4c8g128/values.yaml | 8 +++---- .../2.10/plans/standard-8c32g1024/values.yaml | 8 +++---- .../2.10/plans/standard-8c32g2048/values.yaml | 8 +++---- .../2.10/plans/standard-8c32g768/values.yaml | 8 +++---- .../3.0/plans/standard-2c4g32/values.yaml | 8 +++---- .../3.0/plans/standard-2c4g64/values.yaml | 8 +++---- .../3.0/plans/standard-4c16g256/values.yaml | 8 +++---- .../3.0/plans/standard-4c16g512/values.yaml | 8 +++---- .../3.0/plans/standard-4c8g128/values.yaml | 8 +++---- .../3.0/plans/standard-8c32g1024/values.yaml | 12 +++++----- .../3.0/plans/standard-8c32g2048/values.yaml | 12 +++++----- .../3.0/plans/standard-8c32g768/values.yaml | 8 +++---- .../3.12/plans/standard-16c32g3w/values.yaml | 4 ++-- .../3.12/plans/standard-2c4g3w/values.yaml | 4 ++-- .../3.12/plans/standard-4c8g3w/values.yaml | 4 ++-- .../3.12/plans/standard-8c16g3w/values.yaml | 4 ++-- .../4.0/plans/standard-16c32g3w/values.yaml | 4 ++-- .../4.0/plans/standard-2c4g3w/values.yaml | 4 ++-- .../4.0/plans/standard-4c8g3w/values.yaml | 4 ++-- .../4.0/plans/standard-8c16g3w/values.yaml | 4 ++-- .../7.0/plans/standard-16384/values.yaml | 8 +++---- .../7.0/plans/standard-2048/values.yaml | 8 +++---- .../7.0/plans/standard-32768/values.yaml | 10 ++++---- .../7.0/plans/standard-4096/values.yaml | 6 ++--- .../7.0/plans/standard-65536/values.yaml | 10 ++++---- .../7.0/plans/standard-8192/values.yaml | 8 +++---- .../redis/7.0/plans/standard-1024/values.yaml | 2 +- .../7.0/plans/standard-131072/values.yaml | 4 ++-- .../7.0/plans/standard-16384/values.yaml | 4 ++-- .../redis/7.0/plans/standard-2048/values.yaml | 4 ++-- .../7.0/plans/standard-32768/values.yaml | 4 ++-- .../redis/7.0/plans/standard-4096/values.yaml | 4 ++-- .../7.0/plans/standard-65536/values.yaml | 4 ++-- .../redis/7.0/plans/standard-8192/values.yaml | 4 ++-- .../3.4/plans/standard-16c32g5w/values.yaml | 10 ++++---- .../3.4/plans/standard-16c64g5w/values.yaml | 8 +++---- .../3.4/plans/standard-1c2g2w/values.yaml | 8 +++---- .../3.4/plans/standard-24c48g5w/values.yaml | 10 ++++---- .../3.4/plans/standard-2c4g2w/values.yaml | 10 ++++---- .../3.4/plans/standard-4c8g3w/values.yaml | 10 ++++---- .../3.4/plans/standard-8c16g3w/values.yaml | 10 ++++---- .../3.9/plans/standard-16c32g3w/values.yaml | 4 ++-- .../3.9/plans/standard-1c2g3w/values.yaml | 2 +- .../3.9/plans/standard-2c4g3w/values.yaml | 4 ++-- .../3.9/plans/standard-2c4g5w/values.yaml | 4 ++-- .../3.9/plans/standard-4c8g3w/values.yaml | 4 ++-- .../3.9/plans/standard-8c16g3w/values.yaml | 4 ++-- 88 files changed, 347 insertions(+), 347 deletions(-) diff --git a/addons/airflow/2/plans/standard-16c48g2w/values.yaml b/addons/airflow/2/plans/standard-16c48g2w/values.yaml index 49f5eb54..004364c2 100644 --- a/addons/airflow/2/plans/standard-16c48g2w/values.yaml +++ b/addons/airflow/2/plans/standard-16c48g2w/values.yaml @@ -18,8 +18,8 @@ web: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow scheduler parameters @@ -37,8 +37,8 @@ scheduler: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow worker parameters @@ -56,5 +56,5 @@ worker: cpu: 16 memory: 48Gi requests: - cpu: 4 - memory: 24Gi + cpu: 2 + memory: 6Gi diff --git a/addons/airflow/2/plans/standard-1c2g2w/values.yaml b/addons/airflow/2/plans/standard-1c2g2w/values.yaml index caeaba82..db0d3526 100644 --- a/addons/airflow/2/plans/standard-1c2g2w/values.yaml +++ b/addons/airflow/2/plans/standard-1c2g2w/values.yaml @@ -15,11 +15,11 @@ web: ## resources: limits: - cpu: 1000m - memory: 2048Mi + cpu: 1 + memory: 2Gi requests: - cpu: 500m - memory: 1024Mi + cpu: 200m + memory: 256Mi ## @section Airflow scheduler parameters @@ -34,11 +34,11 @@ scheduler: ## resources: limits: - cpu: 1000m - memory: 2048Mi + cpu: 1 + memory: 2Gi requests: - cpu: 500m - memory: 1024Mi + cpu: 200m + memory: 256Mi ## @section Airflow worker parameters @@ -53,8 +53,8 @@ worker: ## resources: limits: - cpu: 1000m - memory: 2048Mi + cpu: 1 + memory: 2Gi requests: - cpu: 500m - memory: 1024Mi + cpu: 200m + memory: 256Mi diff --git a/addons/airflow/2/plans/standard-24c64g7w/values.yaml b/addons/airflow/2/plans/standard-24c64g7w/values.yaml index 564cc849..4a34db1b 100644 --- a/addons/airflow/2/plans/standard-24c64g7w/values.yaml +++ b/addons/airflow/2/plans/standard-24c64g7w/values.yaml @@ -18,8 +18,8 @@ web: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow scheduler parameters @@ -37,8 +37,8 @@ scheduler: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow worker parameters @@ -56,5 +56,5 @@ worker: cpu: 24 memory: 64Gi requests: - cpu: 12 - memory: 32Gi + cpu: 3 + memory: 8Gi diff --git a/addons/airflow/2/plans/standard-2c4g2w/values.yaml b/addons/airflow/2/plans/standard-2c4g2w/values.yaml index b78fc8d1..e146f090 100644 --- a/addons/airflow/2/plans/standard-2c4g2w/values.yaml +++ b/addons/airflow/2/plans/standard-2c4g2w/values.yaml @@ -15,11 +15,11 @@ web: ## resources: limits: - cpu: 2000m + cpu: 2 memory: 4Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 250m + memory: 512Mi ## @section Airflow scheduler parameters @@ -34,11 +34,11 @@ scheduler: ## resources: limits: - cpu: 2000m + cpu: 2 memory: 4Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 250m + memory: 512Mi ## @section Airflow worker parameters @@ -53,8 +53,8 @@ worker: ## resources: limits: - cpu: 2000m + cpu: 2 memory: 4Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 250m + memory: 512Mi diff --git a/addons/airflow/2/plans/standard-4c16g2w/values.yaml b/addons/airflow/2/plans/standard-4c16g2w/values.yaml index 02e23a4e..4531cacb 100644 --- a/addons/airflow/2/plans/standard-4c16g2w/values.yaml +++ b/addons/airflow/2/plans/standard-4c16g2w/values.yaml @@ -18,8 +18,8 @@ web: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow scheduler parameters @@ -37,8 +37,8 @@ scheduler: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow worker parameters @@ -56,5 +56,5 @@ worker: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 0.5 + memory: 2Gi diff --git a/addons/airflow/2/plans/standard-4c8g2w/values.yaml b/addons/airflow/2/plans/standard-4c8g2w/values.yaml index d4b32254..02ae8bcf 100644 --- a/addons/airflow/2/plans/standard-4c8g2w/values.yaml +++ b/addons/airflow/2/plans/standard-4c8g2w/values.yaml @@ -15,11 +15,11 @@ web: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 8Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow scheduler parameters @@ -34,11 +34,11 @@ scheduler: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 8Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow worker parameters @@ -53,8 +53,8 @@ worker: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 8Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 0.5 + memory: 1Gi diff --git a/addons/airflow/2/plans/standard-8c32g2w/values.yaml b/addons/airflow/2/plans/standard-8c32g2w/values.yaml index ca0f5f5b..f3966612 100644 --- a/addons/airflow/2/plans/standard-8c32g2w/values.yaml +++ b/addons/airflow/2/plans/standard-8c32g2w/values.yaml @@ -18,8 +18,8 @@ web: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow scheduler parameters @@ -37,8 +37,8 @@ scheduler: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 0.5 + memory: 1Gi ## @section Airflow worker parameters @@ -56,5 +56,5 @@ worker: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi diff --git a/addons/flink/1.17/plans/standard-2c4g5w/values.yaml b/addons/flink/1.17/plans/standard-2c4g5w/values.yaml index d52805f3..4068bcc6 100644 --- a/addons/flink/1.17/plans/standard-2c4g5w/values.yaml +++ b/addons/flink/1.17/plans/standard-2c4g5w/values.yaml @@ -18,8 +18,8 @@ jobmanager: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## Apache Flink jobmanager.service parameters ## service: @@ -43,8 +43,8 @@ taskmanager: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container extraEnvVars: # taskmanager.numberOfTaskSlots diff --git a/addons/flink/1.17/plans/standard-4c8g5w/values.yaml b/addons/flink/1.17/plans/standard-4c8g5w/values.yaml index dfdac959..1b24a3a9 100644 --- a/addons/flink/1.17/plans/standard-4c8g5w/values.yaml +++ b/addons/flink/1.17/plans/standard-4c8g5w/values.yaml @@ -18,8 +18,8 @@ jobmanager: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Apache Flink jobmanager.service parameters ## service: @@ -43,8 +43,8 @@ taskmanager: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container extraEnvVars: # taskmanager.numberOfTaskSlots diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml b/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml index 5a6ce4d7..d2f33263 100644 --- a/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml @@ -17,8 +17,8 @@ controller: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml index c63052bc..10e61b73 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml @@ -14,11 +14,11 @@ controller: heapOpts: -Xmx1024m -Xms1024m resources: limits: - cpu: 1000m + cpu: 1 memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 200m + memory: 256Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml b/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml index 265b0b59..240dcbcf 100644 --- a/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml @@ -17,8 +17,8 @@ controller: cpu: 24 memory: 64Gi requests: - cpu: 12 - memory: 32Gi + cpu: 3 + memory: 8Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml b/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml index 70c72ecd..f3255063 100644 --- a/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml @@ -14,11 +14,11 @@ controller: heapOpts: -Xmx2048m -Xms2048m resources: limits: - cpu: 2000m + cpu: 2 memory: 4Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 400m + memory: 512Mi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml b/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml index dd532172..0c8f9cf3 100644 --- a/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml @@ -17,8 +17,8 @@ controller: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml b/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml index 0219fe4e..64e7a47d 100644 --- a/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml @@ -17,8 +17,8 @@ controller: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml b/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml index 52424a5b..17ca16de 100644 --- a/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml +++ b/addons/kvrocks/2.10/plans/standard-16c32g1024/values.yaml @@ -16,8 +16,8 @@ master: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml b/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml index 80019be2..2a3dc736 100644 --- a/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml +++ b/addons/kvrocks/2.10/plans/standard-1c2g64/values.yaml @@ -16,8 +16,8 @@ master: cpu: 1 memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 100m + memory: 128Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 1 memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 100m + memory: 256Mi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ @@ -121,8 +121,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -135,5 +135,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml b/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml index 4fd489b1..3302b6ed 100644 --- a/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml +++ b/addons/kvrocks/2.10/plans/standard-2c4g128/values.yaml @@ -16,8 +16,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ @@ -121,8 +121,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -135,5 +135,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml b/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml index 024daf43..228347e3 100644 --- a/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml +++ b/addons/kvrocks/2.10/plans/standard-4c8g256/values.yaml @@ -16,8 +16,8 @@ master: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml b/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml index 5d563525..dd5ff09e 100644 --- a/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml +++ b/addons/kvrocks/2.10/plans/standard-8c16g512/values.yaml @@ -16,8 +16,8 @@ master: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.8/plans/standard-16c32g1024/values.yaml b/addons/kvrocks/2.8/plans/standard-16c32g1024/values.yaml index 52424a5b..17ca16de 100644 --- a/addons/kvrocks/2.8/plans/standard-16c32g1024/values.yaml +++ b/addons/kvrocks/2.8/plans/standard-16c32g1024/values.yaml @@ -16,8 +16,8 @@ master: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.8/plans/standard-1c2g64/values.yaml b/addons/kvrocks/2.8/plans/standard-1c2g64/values.yaml index 80019be2..c2e0a93b 100644 --- a/addons/kvrocks/2.8/plans/standard-1c2g64/values.yaml +++ b/addons/kvrocks/2.8/plans/standard-1c2g64/values.yaml @@ -16,8 +16,8 @@ master: cpu: 1 memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 100m + memory: 256Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 1 memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 100m + memory: 256Mi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ @@ -121,8 +121,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -135,5 +135,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 200m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.8/plans/standard-2c4g128/values.yaml b/addons/kvrocks/2.8/plans/standard-2c4g128/values.yaml index 4fd489b1..3302b6ed 100644 --- a/addons/kvrocks/2.8/plans/standard-2c4g128/values.yaml +++ b/addons/kvrocks/2.8/plans/standard-2c4g128/values.yaml @@ -16,8 +16,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ @@ -121,8 +121,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -135,5 +135,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.8/plans/standard-4c8g256/values.yaml b/addons/kvrocks/2.8/plans/standard-4c8g256/values.yaml index 024daf43..228347e3 100644 --- a/addons/kvrocks/2.8/plans/standard-4c8g256/values.yaml +++ b/addons/kvrocks/2.8/plans/standard-4c8g256/values.yaml @@ -16,8 +16,8 @@ master: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/kvrocks/2.8/plans/standard-8c16g512/values.yaml b/addons/kvrocks/2.8/plans/standard-8c16g512/values.yaml index 5d563525..dd5ff09e 100644 --- a/addons/kvrocks/2.8/plans/standard-8c16g512/values.yaml +++ b/addons/kvrocks/2.8/plans/standard-8c16g512/values.yaml @@ -16,8 +16,8 @@ master: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -61,8 +61,8 @@ replica: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Persistence Parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -120,8 +120,8 @@ sentinel: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi proxy: ## Kvrocks; Sentinel resource requests and limits @@ -134,5 +134,5 @@ proxy: cpu: 1 memory: 1Gi requests: - cpu: 500m - memory: 512Mi \ No newline at end of file + cpu: 100m + memory: 128Mi \ No newline at end of file diff --git a/addons/minio/2023/plans/standard-v4s1024/values.yaml b/addons/minio/2023/plans/standard-v4s1024/values.yaml index fd42862f..cb2dff66 100644 --- a/addons/minio/2023/plans/standard-v4s1024/values.yaml +++ b/addons/minio/2023/plans/standard-v4s1024/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s128/values.yaml b/addons/minio/2023/plans/standard-v4s128/values.yaml index 49a6b3e8..f015dae0 100644 --- a/addons/minio/2023/plans/standard-v4s128/values.yaml +++ b/addons/minio/2023/plans/standard-v4s128/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 1 memory: 2048Mi requests: - cpu: 500m - memory: 1024Mi + cpu: 200m + memory: 256Mi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s2048/values.yaml b/addons/minio/2023/plans/standard-v4s2048/values.yaml index 539a6e9f..4c12e8fc 100644 --- a/addons/minio/2023/plans/standard-v4s2048/values.yaml +++ b/addons/minio/2023/plans/standard-v4s2048/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s256/values.yaml b/addons/minio/2023/plans/standard-v4s256/values.yaml index 143c7bb3..5c8cda9d 100644 --- a/addons/minio/2023/plans/standard-v4s256/values.yaml +++ b/addons/minio/2023/plans/standard-v4s256/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 400m + memory: 512Mi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s3096/values.yaml b/addons/minio/2023/plans/standard-v4s3096/values.yaml index 64b0212d..69487611 100644 --- a/addons/minio/2023/plans/standard-v4s3096/values.yaml +++ b/addons/minio/2023/plans/standard-v4s3096/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s32/values.yaml b/addons/minio/2023/plans/standard-v4s32/values.yaml index d0fbaa25..7a4d7ae2 100644 --- a/addons/minio/2023/plans/standard-v4s32/values.yaml +++ b/addons/minio/2023/plans/standard-v4s32/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 500m memory: 512Mi requests: - cpu: 250m - memory: 256Mi + cpu: 100m + memory: 128Mi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s512/values.yaml b/addons/minio/2023/plans/standard-v4s512/values.yaml index 20d2e138..9dc1c82c 100644 --- a/addons/minio/2023/plans/standard-v4s512/values.yaml +++ b/addons/minio/2023/plans/standard-v4s512/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2 memory: 8Gi requests: - cpu: 1 - memory: 4Gi + cpu: 400m + memory: 1Gi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v4s64/values.yaml b/addons/minio/2023/plans/standard-v4s64/values.yaml index 584e82b1..ebead3a5 100644 --- a/addons/minio/2023/plans/standard-v4s64/values.yaml +++ b/addons/minio/2023/plans/standard-v4s64/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 500m memory: 1024Mi requests: - cpu: 250m - memory: 512Mi + cpu: 100m + memory: 256Mi ## @section Persistence parameters diff --git a/addons/minio/2023/plans/standard-v6d4s1T/values.yaml b/addons/minio/2023/plans/standard-v6d4s1T/values.yaml index a892572d..6646e0f4 100644 --- a/addons/minio/2023/plans/standard-v6d4s1T/values.yaml +++ b/addons/minio/2023/plans/standard-v6d4s1T/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## MinIO® statefulset parameters ## Only when mode is 'distributed' diff --git a/addons/minio/2023/plans/standard-v8d4s1T/values.yaml b/addons/minio/2023/plans/standard-v8d4s1T/values.yaml index 90531616..ccbf5ca6 100644 --- a/addons/minio/2023/plans/standard-v8d4s1T/values.yaml +++ b/addons/minio/2023/plans/standard-v8d4s1T/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## MinIO® statefulset parameters ## Only when mode is 'distributed' diff --git a/addons/minio/2023/plans/standard-v8d4s2T/values.yaml b/addons/minio/2023/plans/standard-v8d4s2T/values.yaml index 46e19fef..2571bfde 100644 --- a/addons/minio/2023/plans/standard-v8d4s2T/values.yaml +++ b/addons/minio/2023/plans/standard-v8d4s2T/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## MinIO® statefulset parameters ## Only when mode is 'distributed' diff --git a/addons/minio/2023/plans/standard-v8d4s3T/values.yaml b/addons/minio/2023/plans/standard-v8d4s3T/values.yaml index 82bde774..9ff6bc6f 100644 --- a/addons/minio/2023/plans/standard-v8d4s3T/values.yaml +++ b/addons/minio/2023/plans/standard-v8d4s3T/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 64Gi requests: - cpu: 4 - memory: 32Gi + cpu: 1 + memory: 8Gi ## MinIO® statefulset parameters ## Only when mode is 'distributed' diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml b/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml index 157ca24b..3d7e226e 100644 --- a/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml +++ b/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml @@ -24,8 +24,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 200m + memory: 512Mi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 500m requests: - memory: 1024Mi - cpu: 250m + memory: 512Mi + cpu: 100m diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml b/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml index 10a0106f..1d7f5782 100644 --- a/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml +++ b/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml @@ -24,8 +24,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 500m requests: - memory: 1024Mi - cpu: 250m + memory: 512Mi + cpu: 100m diff --git a/addons/opensearch/2.10/plans/standard-4c16g256/values.yaml b/addons/opensearch/2.10/plans/standard-4c16g256/values.yaml index 0b15b099..808b5cec 100644 --- a/addons/opensearch/2.10/plans/standard-4c16g256/values.yaml +++ b/addons/opensearch/2.10/plans/standard-4c16g256/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 1000m requests: - memory: 1024Mi - cpu: 500m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/2.10/plans/standard-4c16g512/values.yaml b/addons/opensearch/2.10/plans/standard-4c16g512/values.yaml index 8abebf0c..95d308b1 100644 --- a/addons/opensearch/2.10/plans/standard-4c16g512/values.yaml +++ b/addons/opensearch/2.10/plans/standard-4c16g512/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 3072Mi cpu: 1500m requests: - memory: 1024Mi + memory: 1Gi cpu: 150m diff --git a/addons/opensearch/2.10/plans/standard-4c8g128/values.yaml b/addons/opensearch/2.10/plans/standard-4c8g128/values.yaml index 21e4c16c..8c3601ef 100644 --- a/addons/opensearch/2.10/plans/standard-4c8g128/values.yaml +++ b/addons/opensearch/2.10/plans/standard-4c8g128/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 1000m requests: - memory: 1024Mi - cpu: 500m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/2.10/plans/standard-8c32g1024/values.yaml b/addons/opensearch/2.10/plans/standard-8c32g1024/values.yaml index 5829f62a..28386fa1 100644 --- a/addons/opensearch/2.10/plans/standard-8c32g1024/values.yaml +++ b/addons/opensearch/2.10/plans/standard-8c32g1024/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 4096Mi cpu: 2000m requests: - memory: 2058Mi - cpu: 1000m + memory: 1Gi + cpu: 500m diff --git a/addons/opensearch/2.10/plans/standard-8c32g2048/values.yaml b/addons/opensearch/2.10/plans/standard-8c32g2048/values.yaml index 50a21715..9a51cb78 100644 --- a/addons/opensearch/2.10/plans/standard-8c32g2048/values.yaml +++ b/addons/opensearch/2.10/plans/standard-8c32g2048/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 4096Mi cpu: 2000m requests: - memory: 2058Mi - cpu: 1000m + memory: 512Mi + cpu: 500m diff --git a/addons/opensearch/2.10/plans/standard-8c32g768/values.yaml b/addons/opensearch/2.10/plans/standard-8c32g768/values.yaml index 01d271dc..a349130e 100644 --- a/addons/opensearch/2.10/plans/standard-8c32g768/values.yaml +++ b/addons/opensearch/2.10/plans/standard-8c32g768/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 3072Mi cpu: 1500m requests: - memory: 1024Mi - cpu: 750m + memory: 512Mi + cpu: 500m diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml b/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml index 157ca24b..9cb5b8ee 100644 --- a/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml +++ b/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml @@ -24,8 +24,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 500m requests: - memory: 1024Mi - cpu: 250m + memory: 512Mi + cpu: 100m diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml b/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml index 10a0106f..a1c5641b 100644 --- a/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml +++ b/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml @@ -24,8 +24,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 500m requests: - memory: 1024Mi - cpu: 250m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml b/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml index 0b15b099..808b5cec 100644 --- a/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml +++ b/addons/opensearch/3.0/plans/standard-4c16g256/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 1000m requests: - memory: 1024Mi - cpu: 500m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml b/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml index 8abebf0c..2ed4d38a 100644 --- a/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml +++ b/addons/opensearch/3.0/plans/standard-4c16g512/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 16Gi requests: - cpu: 2 - memory: 8Gi + cpu: 500m + memory: 2Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 3072Mi cpu: 1500m requests: - memory: 1024Mi - cpu: 150m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml b/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml index 21e4c16c..8c3601ef 100644 --- a/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml +++ b/addons/opensearch/3.0/plans/standard-4c8g128/values.yaml @@ -24,8 +24,8 @@ master: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 2048Mi cpu: 1000m requests: - memory: 1024Mi - cpu: 500m + memory: 512Mi + cpu: 200m diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml index 5829f62a..2b710e89 100644 --- a/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml +++ b/addons/opensearch/3.0/plans/standard-8c32g1024/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -150,8 +150,8 @@ dashboards: ## resources: limits: - memory: 4096Mi - cpu: 2000m + memory: 4Gi + cpu: 2 requests: - memory: 2058Mi - cpu: 1000m + memory: 512Mi + cpu: 500m diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml index 50a21715..abf7365e 100644 --- a/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml +++ b/addons/opensearch/3.0/plans/standard-8c32g2048/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -150,8 +150,8 @@ dashboards: ## resources: limits: - memory: 4096Mi - cpu: 2000m + memory: 4Gi + cpu: 2 requests: - memory: 2058Mi - cpu: 1000m + memory: 512Mi + cpu: 500m diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml b/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml index 01d271dc..bdceb83a 100644 --- a/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml +++ b/addons/opensearch/3.0/plans/standard-8c32g768/values.yaml @@ -24,8 +24,8 @@ master: cpu: 8 memory: 32Gi requests: - cpu: 4 - memory: 16Gi + cpu: 1 + memory: 4Gi ## @param master.heapSize OpenSearch master-eligible node heap size. ## Note: The recommended heapSize is half of the container's memory. ## If omitted, it will be automatically set. @@ -153,5 +153,5 @@ dashboards: memory: 3072Mi cpu: 1500m requests: - memory: 1024Mi - cpu: 750m + memory: 512Mi + cpu: 200m diff --git a/addons/rabbitmq/3.12/plans/standard-16c32g3w/values.yaml b/addons/rabbitmq/3.12/plans/standard-16c32g3w/values.yaml index c951cdbf..c6c4695d 100644 --- a/addons/rabbitmq/3.12/plans/standard-16c32g3w/values.yaml +++ b/addons/rabbitmq/3.12/plans/standard-16c32g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/3.12/plans/standard-2c4g3w/values.yaml b/addons/rabbitmq/3.12/plans/standard-2c4g3w/values.yaml index c9e6dbdf..9e83ae0d 100644 --- a/addons/rabbitmq/3.12/plans/standard-2c4g3w/values.yaml +++ b/addons/rabbitmq/3.12/plans/standard-2c4g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/3.12/plans/standard-4c8g3w/values.yaml b/addons/rabbitmq/3.12/plans/standard-4c8g3w/values.yaml index 1db3c36e..1df30450 100644 --- a/addons/rabbitmq/3.12/plans/standard-4c8g3w/values.yaml +++ b/addons/rabbitmq/3.12/plans/standard-4c8g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/3.12/plans/standard-8c16g3w/values.yaml b/addons/rabbitmq/3.12/plans/standard-8c16g3w/values.yaml index e50a93f0..8391871a 100644 --- a/addons/rabbitmq/3.12/plans/standard-8c16g3w/values.yaml +++ b/addons/rabbitmq/3.12/plans/standard-8c16g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml index c951cdbf..c6c4695d 100644 --- a/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml +++ b/addons/rabbitmq/4.0/plans/standard-16c32g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml index c9e6dbdf..9e83ae0d 100644 --- a/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml +++ b/addons/rabbitmq/4.0/plans/standard-2c4g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml index 1db3c36e..1df30450 100644 --- a/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml +++ b/addons/rabbitmq/4.0/plans/standard-4c8g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml b/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml index e50a93f0..8391871a 100644 --- a/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml +++ b/addons/rabbitmq/4.0/plans/standard-8c16g3w/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## @param replicaCount Number of RabbitMQ replicas to deploy ## diff --git a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml index 02861f15..d5774764 100644 --- a/addons/redis-cluster/7.0/plans/standard-16384/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-16384/values.yaml @@ -29,8 +29,8 @@ redis: cpu: 2000m memory: 16Gi requests: - cpu: 1000m - memory: 8Gi + cpu: 500m + memory: 2Gi ## @section Proxy® statefulset parameters ## @@ -51,5 +51,5 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml index ad1cc06c..2366a90f 100644 --- a/addons/redis-cluster/7.0/plans/standard-2048/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-2048/values.yaml @@ -29,8 +29,8 @@ redis: cpu: 500m memory: 2Gi requests: - cpu: 250m - memory: 1Gi + cpu: 200m + memory: 512Mi ## @section Proxy® statefulset parameters ## @@ -51,5 +51,5 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml index c48bd12e..986c6a7a 100644 --- a/addons/redis-cluster/7.0/plans/standard-32768/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-32768/values.yaml @@ -26,11 +26,11 @@ redis: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 32Gi requests: - cpu: 2000m - memory: 16Gi + cpu: 1 + memory: 4Gi ## @section Proxy® statefulset parameters ## @@ -51,5 +51,5 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml index 9c5f8c8f..cf810aa2 100644 --- a/addons/redis-cluster/7.0/plans/standard-4096/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-4096/values.yaml @@ -30,7 +30,7 @@ redis: memory: 4Gi requests: cpu: 500m - memory: 2Gi + memory: 512Mi ## @section Proxy® statefulset parameters ## @@ -51,6 +51,6 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml index 07b17e0a..4ba362d3 100644 --- a/addons/redis-cluster/7.0/plans/standard-65536/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-65536/values.yaml @@ -32,11 +32,11 @@ redis: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 64Gi requests: - cpu: 2000m - memory: 32Gi + cpu: 1 + memory: 8Gi ## @section Proxy® statefulset parameters ## @@ -57,5 +57,5 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml index 9e57254f..187d7214 100644 --- a/addons/redis-cluster/7.0/plans/standard-8192/values.yaml +++ b/addons/redis-cluster/7.0/plans/standard-8192/values.yaml @@ -29,8 +29,8 @@ redis: cpu: 2000m memory: 8Gi requests: - cpu: 1000m - memory: 4Gi + cpu: 500m + memory: 1Gi ## @section Proxy® statefulset parameters ## @@ -51,5 +51,5 @@ proxy: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 1Gi diff --git a/addons/redis/7.0/plans/standard-1024/values.yaml b/addons/redis/7.0/plans/standard-1024/values.yaml index 58490a00..b7159eef 100644 --- a/addons/redis/7.0/plans/standard-1024/values.yaml +++ b/addons/redis/7.0/plans/standard-1024/values.yaml @@ -17,7 +17,7 @@ master: memory: 1024Mi requests: cpu: 100m - memory: 512Mi + memory: 128Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-131072/values.yaml b/addons/redis/7.0/plans/standard-131072/values.yaml index 3c92b5f7..54ed812f 100644 --- a/addons/redis/7.0/plans/standard-131072/values.yaml +++ b/addons/redis/7.0/plans/standard-131072/values.yaml @@ -16,8 +16,8 @@ master: cpu: 4 memory: 128Gi requests: - cpu: 2 - memory: 64Gi + cpu: 1 + memory: 16Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-16384/values.yaml b/addons/redis/7.0/plans/standard-16384/values.yaml index cf9c77cd..9e15d269 100644 --- a/addons/redis/7.0/plans/standard-16384/values.yaml +++ b/addons/redis/7.0/plans/standard-16384/values.yaml @@ -16,8 +16,8 @@ master: cpu: 1000m memory: 16Gi requests: - cpu: 500m - memory: 8Gi + cpu: 200m + memory: 2Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-2048/values.yaml b/addons/redis/7.0/plans/standard-2048/values.yaml index a7ceac41..c56f6249 100644 --- a/addons/redis/7.0/plans/standard-2048/values.yaml +++ b/addons/redis/7.0/plans/standard-2048/values.yaml @@ -16,8 +16,8 @@ master: cpu: 300m memory: 2Gi requests: - cpu: 150m - memory: 1Gi + cpu: 100m + memory: 512Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-32768/values.yaml b/addons/redis/7.0/plans/standard-32768/values.yaml index 0b83c105..863044ca 100644 --- a/addons/redis/7.0/plans/standard-32768/values.yaml +++ b/addons/redis/7.0/plans/standard-32768/values.yaml @@ -16,8 +16,8 @@ master: cpu: 1000m memory: 32Gi requests: - cpu: 500m - memory: 16Gi + cpu: 200m + memory: 4Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-4096/values.yaml b/addons/redis/7.0/plans/standard-4096/values.yaml index eff0868a..5a6c3e14 100644 --- a/addons/redis/7.0/plans/standard-4096/values.yaml +++ b/addons/redis/7.0/plans/standard-4096/values.yaml @@ -16,8 +16,8 @@ master: cpu: 500m memory: 4Gi requests: - cpu: 250m - memory: 2Gi + cpu: 200m + memory: 512Mi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-65536/values.yaml b/addons/redis/7.0/plans/standard-65536/values.yaml index b47da715..baffeec9 100644 --- a/addons/redis/7.0/plans/standard-65536/values.yaml +++ b/addons/redis/7.0/plans/standard-65536/values.yaml @@ -16,8 +16,8 @@ master: cpu: 2000m memory: 64Gi requests: - cpu: 1000m - memory: 32Gi + cpu: 500m + memory: 8Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/redis/7.0/plans/standard-8192/values.yaml b/addons/redis/7.0/plans/standard-8192/values.yaml index 8c4bc3cb..69a96a3a 100644 --- a/addons/redis/7.0/plans/standard-8192/values.yaml +++ b/addons/redis/7.0/plans/standard-8192/values.yaml @@ -16,8 +16,8 @@ master: cpu: 1000m memory: 8Gi requests: - cpu: 500m - memory: 4Gi + cpu: 200m + memory: 1Gi ## Persistence parameters ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-16c32g5w/values.yaml b/addons/spark/3.4/plans/standard-16c32g5w/values.yaml index e4f2397a..839dc833 100644 --- a/addons/spark/3.4/plans/standard-16c32g5w/values.yaml +++ b/addons/spark/3.4/plans/standard-16c32g5w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 16 memory: 32Gi requests: - cpu: 8 - memory: 16Gi + cpu: 2 + memory: 4Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -80,11 +80,11 @@ worker: ## resources: limits: - cpu: 16000m + cpu: 16 memory: 32Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 2 + memory: 4Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-16c64g5w/values.yaml b/addons/spark/3.4/plans/standard-16c64g5w/values.yaml index 3940f0de..bd63e02a 100644 --- a/addons/spark/3.4/plans/standard-16c64g5w/values.yaml +++ b/addons/spark/3.4/plans/standard-16c64g5w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 16 memory: 64Gi requests: - cpu: 4 - memory: 16Gi + cpu: 2 + memory: 8Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -83,8 +83,8 @@ worker: cpu: 16 memory: 64Gi requests: - cpu: 4 - memory: 16Gi + cpu: 2 + memory: 8Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-1c2g2w/values.yaml b/addons/spark/3.4/plans/standard-1c2g2w/values.yaml index c7653f7a..f25cb9d0 100644 --- a/addons/spark/3.4/plans/standard-1c2g2w/values.yaml +++ b/addons/spark/3.4/plans/standard-1c2g2w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 1000m memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 200m + memory: 512Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -84,8 +84,8 @@ worker: cpu: 1000m memory: 2Gi requests: - cpu: 500m - memory: 1Gi + cpu: 200m + memory: 512Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-24c48g5w/values.yaml b/addons/spark/3.4/plans/standard-24c48g5w/values.yaml index 9ef58f5e..52fa043f 100644 --- a/addons/spark/3.4/plans/standard-24c48g5w/values.yaml +++ b/addons/spark/3.4/plans/standard-24c48g5w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 24 memory: 48Gi requests: - cpu: 12 - memory: 24Gi + cpu: 3 + memory: 6Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -80,11 +80,11 @@ worker: ## resources: limits: - cpu: 24000m + cpu: 24 memory: 48Gi requests: - cpu: 8000m - memory: 24Gi + cpu: 3 + memory: 6Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-2c4g2w/values.yaml b/addons/spark/3.4/plans/standard-2c4g2w/values.yaml index d896e2b4..8fedf8ed 100644 --- a/addons/spark/3.4/plans/standard-2c4g2w/values.yaml +++ b/addons/spark/3.4/plans/standard-2c4g2w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 2 memory: 4Gi requests: - cpu: 1 - memory: 2Gi + cpu: 500m + memory: 512Mi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -80,11 +80,11 @@ worker: ## resources: limits: - cpu: 2000m + cpu: 2 memory: 4Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 500m + memory: 512Mi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-4c8g3w/values.yaml b/addons/spark/3.4/plans/standard-4c8g3w/values.yaml index fcc2d436..47adb297 100644 --- a/addons/spark/3.4/plans/standard-4c8g3w/values.yaml +++ b/addons/spark/3.4/plans/standard-4c8g3w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 4 memory: 8Gi requests: - cpu: 2 - memory: 4Gi + cpu: 500m + memory: 1Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -80,11 +80,11 @@ worker: ## resources: limits: - cpu: 4000m + cpu: 4 memory: 8Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 500m + memory: 1Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/spark/3.4/plans/standard-8c16g3w/values.yaml b/addons/spark/3.4/plans/standard-8c16g3w/values.yaml index e7bac965..dc738f40 100644 --- a/addons/spark/3.4/plans/standard-8c16g3w/values.yaml +++ b/addons/spark/3.4/plans/standard-8c16g3w/values.yaml @@ -31,8 +31,8 @@ master: cpu: 8 memory: 16Gi requests: - cpu: 4 - memory: 8Gi + cpu: 1 + memory: 2Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## @@ -81,11 +81,11 @@ worker: ## resources: limits: - cpu: 8000m + cpu: 8 memory: 16Gi requests: - cpu: 2000m - memory: 8Gi + cpu: 1 + memory: 2Gi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## diff --git a/addons/zookeeper/3.9/plans/standard-16c32g3w/values.yaml b/addons/zookeeper/3.9/plans/standard-16c32g3w/values.yaml index 00faa075..9e45943a 100644 --- a/addons/zookeeper/3.9/plans/standard-16c32g3w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-16c32g3w/values.yaml @@ -17,8 +17,8 @@ resources: memory: 32Gi cpu: 16 requests: - memory: 16Gi - cpu: 8 + memory: 4Gi + cpu: 2 ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims diff --git a/addons/zookeeper/3.9/plans/standard-1c2g3w/values.yaml b/addons/zookeeper/3.9/plans/standard-1c2g3w/values.yaml index 292f2e55..6a644875 100644 --- a/addons/zookeeper/3.9/plans/standard-1c2g3w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-1c2g3w/values.yaml @@ -17,7 +17,7 @@ resources: memory: 2Gi cpu: 1 requests: - memory: 1Gi + memory: 512Mi cpu: 500m ## @section Persistence parameters diff --git a/addons/zookeeper/3.9/plans/standard-2c4g3w/values.yaml b/addons/zookeeper/3.9/plans/standard-2c4g3w/values.yaml index 993be887..fad0556f 100644 --- a/addons/zookeeper/3.9/plans/standard-2c4g3w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-2c4g3w/values.yaml @@ -17,8 +17,8 @@ resources: memory: 4Gi cpu: 2 requests: - memory: 2Gi - cpu: 1 + memory: 512Mi + cpu: 500m ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims diff --git a/addons/zookeeper/3.9/plans/standard-2c4g5w/values.yaml b/addons/zookeeper/3.9/plans/standard-2c4g5w/values.yaml index b8dfc31c..091193fd 100644 --- a/addons/zookeeper/3.9/plans/standard-2c4g5w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-2c4g5w/values.yaml @@ -17,8 +17,8 @@ resources: memory: 4Gi cpu: 2 requests: - memory: 2Gi - cpu: 1 + memory: 512Mi + cpu: 500m ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims diff --git a/addons/zookeeper/3.9/plans/standard-4c8g3w/values.yaml b/addons/zookeeper/3.9/plans/standard-4c8g3w/values.yaml index 72c1379e..6f80f721 100644 --- a/addons/zookeeper/3.9/plans/standard-4c8g3w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-4c8g3w/values.yaml @@ -17,8 +17,8 @@ resources: memory: 8Gi cpu: 4 requests: - memory: 4Gi - cpu: 2 + memory: 1Gi + cpu: 1 ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims diff --git a/addons/zookeeper/3.9/plans/standard-8c16g3w/values.yaml b/addons/zookeeper/3.9/plans/standard-8c16g3w/values.yaml index 7c8b37c8..3459b1a7 100644 --- a/addons/zookeeper/3.9/plans/standard-8c16g3w/values.yaml +++ b/addons/zookeeper/3.9/plans/standard-8c16g3w/values.yaml @@ -17,8 +17,8 @@ resources: memory: 16Gi cpu: 8 requests: - memory: 8Gi - cpu: 4 + memory: 2Gi + cpu: 1 ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims From a453dcc8f379ca6b765ea56eb0f012ee05b90645 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 10 Jul 2025 09:40:36 +0800 Subject: [PATCH 65/93] chore(redis): support auth config --- addons/redis/7.0/meta.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/addons/redis/7.0/meta.yaml b/addons/redis/7.0/meta.yaml index 74688fd6..659c86ba 100644 --- a/addons/redis/7.0/meta.yaml +++ b/addons/redis/7.0/meta.yaml @@ -15,9 +15,9 @@ instances_retrievable: true bindings_retrievable: true plan_updateable: true allow_parameters: -- name: "auth.password" +- name: "auth" required: false - description: "auth.password config for values.yaml" + description: "auth config for values.yaml" - name: "commonConfiguration" required: false description: "commonConfiguration config for values.yaml" From e5e42670fa17f818b078b60df47d8a1f5496e15a Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 1 Aug 2025 11:12:03 +0800 Subject: [PATCH 66/93] chore(addons): support externalTrafficPolicy config --- addons/airflow/2/meta.yaml | 3 +++ addons/airflow/3/meta.yaml | 3 +++ addons/flink/1.17/meta.yaml | 6 ++++++ addons/kafka/3.6/meta.yaml | 3 +++ addons/kvrocks/2.10/meta.yaml | 9 +++++++++ addons/kvrocks/2.8/meta.yaml | 9 +++++++++ addons/minio/2023/meta.yaml | 3 +++ addons/opensearch/2.10/meta.yaml | 6 ++++++ addons/opensearch/3.0/meta.yaml | 6 ++++++ addons/redis-cluster/7.0/meta.yaml | 3 +++ addons/redis/7.0/meta.yaml | 9 +++++++++ addons/seaweedfs/3/meta.yaml | 9 +++++++++ addons/spark/3.4/meta.yaml | 3 +++ addons/zookeeper/3.9/meta.yaml | 3 +++ 14 files changed, 75 insertions(+) diff --git a/addons/airflow/2/meta.yaml b/addons/airflow/2/meta.yaml index 076efa04..0e562f43 100644 --- a/addons/airflow/2/meta.yaml +++ b/addons/airflow/2/meta.yaml @@ -66,6 +66,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "statsd.enabled" required: false description: "statsd enabled or not config for values.yaml" diff --git a/addons/airflow/3/meta.yaml b/addons/airflow/3/meta.yaml index 0ed38dc8..5a4e808a 100644 --- a/addons/airflow/3/meta.yaml +++ b/addons/airflow/3/meta.yaml @@ -87,6 +87,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "metrics.enabled" required: false description: "metrics enabled or not config for values.yaml" diff --git a/addons/flink/1.17/meta.yaml b/addons/flink/1.17/meta.yaml index 95f7b633..be6bd6f8 100644 --- a/addons/flink/1.17/meta.yaml +++ b/addons/flink/1.17/meta.yaml @@ -24,6 +24,9 @@ allow_parameters: - name: "jobmanager.service.type" required: false description: "jobmanager service type config for values.yaml" +- name: "jobmanager.service.externalTrafficPolicy" + required: false + description: "jobmanager service externalTrafficPolicy config for values.yaml" - name: "jobmanager.networkPolicy.allowNamespaces" required: false description: "jobmanager networkPolicy allowNamespaces config for values.yaml" @@ -36,6 +39,9 @@ allow_parameters: - name: "taskmanager.service.type" required: false description: "taskmanager service type config for values.yaml" +- name: "taskmanager.service.externalTrafficPolicy" + required: false + description: "taskmanager service externalTrafficPolicy config for values.yaml" - name: "taskmanager.networkPolicy.allowNamespaces" required: false description: "taskmanager networkPolicy allowNamespaces config for values.yaml" diff --git a/addons/kafka/3.6/meta.yaml b/addons/kafka/3.6/meta.yaml index 57d5503d..2becc60f 100644 --- a/addons/kafka/3.6/meta.yaml +++ b/addons/kafka/3.6/meta.yaml @@ -75,6 +75,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "externalAccess.enabled" required: false description: "externalAccess enabled or not config for values.yaml" diff --git a/addons/kvrocks/2.10/meta.yaml b/addons/kvrocks/2.10/meta.yaml index 5029ca8d..548017ec 100644 --- a/addons/kvrocks/2.10/meta.yaml +++ b/addons/kvrocks/2.10/meta.yaml @@ -30,6 +30,9 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.service.externalTrafficPolicy" + required: false + description: "master service externalTrafficPolicy config for values.yaml" - name: "master.nodeSelector" required: false description: "master nodeSelector config for values.yaml" @@ -39,12 +42,18 @@ allow_parameters: - name: "replica.service.type" required: false description: "replica service type config for values.yaml" +- name: "replica.service.externalTrafficPolicy" + required: false + description: "replica service externalTrafficPolicy config for values.yaml" - name: "sentinel.service.type" required: false description: "sentinel service type config for values.yaml" - name: "sentinel.extraEnvVars" required: false description: "sentinel extraEnvVars config for values.yaml" +- name: "sentinel.service.externalTrafficPolicy" + required: false + description: "sentinel service externalTrafficPolicy config for values.yaml" - name: "sentinel.enabled" required: false description: "sentinel enabled type config for values.yaml" diff --git a/addons/kvrocks/2.8/meta.yaml b/addons/kvrocks/2.8/meta.yaml index 711982d4..349a8fa6 100644 --- a/addons/kvrocks/2.8/meta.yaml +++ b/addons/kvrocks/2.8/meta.yaml @@ -30,6 +30,9 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.service.externalTrafficPolicy" + required: false + description: "master service externalTrafficPolicy config for values.yaml" - name: "master.nodeSelector" required: false description: "master nodeSelector config for values.yaml" @@ -39,9 +42,15 @@ allow_parameters: - name: "replica.service.type" required: false description: "replica service type config for values.yaml" +- name: "replica.service.externalTrafficPolicy" + required: false + description: "replica service externalTrafficPolicy config for values.yaml" - name: "sentinel.service.type" required: false description: "sentinel service type config for values.yaml" +- name: "sentinel.service.externalTrafficPolicy" + required: false + description: "sentinel service externalTrafficPolicy config for values.yaml" - name: "sentinel.extraEnvVars" required: false description: "sentinel extraEnvVars config for values.yaml" diff --git a/addons/minio/2023/meta.yaml b/addons/minio/2023/meta.yaml index 0d5cdf46..c3de64d3 100644 --- a/addons/minio/2023/meta.yaml +++ b/addons/minio/2023/meta.yaml @@ -27,6 +27,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "auth.rootPassword" required: false description: "auth rootPassword config for values.yaml" diff --git a/addons/opensearch/2.10/meta.yaml b/addons/opensearch/2.10/meta.yaml index 1189d899..8990d291 100644 --- a/addons/opensearch/2.10/meta.yaml +++ b/addons/opensearch/2.10/meta.yaml @@ -54,10 +54,16 @@ allow_parameters: - name: "dashboards.networkPolicy.allowNamespaces" required: false description: "dashboards networkPolicy allowNamespaces config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "service.type" required: false description: "service type config for values.yaml" - name: "dashboards.service.type" required: false description: "dashboards service type config for values.yaml" +- name: "dashboards.service.externalTrafficPolicy" + required: false + description: "dashboards service externalTrafficPolicy config for values.yaml" archive: false diff --git a/addons/opensearch/3.0/meta.yaml b/addons/opensearch/3.0/meta.yaml index 22ff92e1..137cf543 100644 --- a/addons/opensearch/3.0/meta.yaml +++ b/addons/opensearch/3.0/meta.yaml @@ -60,7 +60,13 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "dashboards.service.type" required: false description: "dashboards service type config for values.yaml" +- name: "dashboards.service.externalTrafficPolicy" + required: false + description: "dashboards service externalTrafficPolicy config for values.yaml" archive: false diff --git a/addons/redis-cluster/7.0/meta.yaml b/addons/redis-cluster/7.0/meta.yaml index 074e22a1..4e19af1f 100644 --- a/addons/redis-cluster/7.0/meta.yaml +++ b/addons/redis-cluster/7.0/meta.yaml @@ -36,6 +36,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "metrics.enabled" required: false description: "metrics enabled or not config for values.yaml" diff --git a/addons/redis/7.0/meta.yaml b/addons/redis/7.0/meta.yaml index 659c86ba..203d94ea 100644 --- a/addons/redis/7.0/meta.yaml +++ b/addons/redis/7.0/meta.yaml @@ -27,6 +27,9 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.service.externalTrafficPolicy" + required: false + description: "master service externalTrafficPolicy config for values.yaml" - name: "master.nodeSelector" required: false description: "master nodeSelector config for values.yaml" @@ -39,9 +42,15 @@ allow_parameters: - name: "replica.service.type" required: false description: "replica service type config for values.yaml" +- name: "replica.service.externalTrafficPolicy" + required: false + description: "replica service externalTrafficPolicy config for values.yaml" - name: "sentinel.service.type" required: false description: "sentinel service type config for values.yaml" +- name: "sentinel.service.externalTrafficPolicy" + required: false + description: "sentinel service externalTrafficPolicy config for values.yaml" - name: "replica.disableCommands" required: false description: "replica.disableCommands config for values.yaml" diff --git a/addons/seaweedfs/3/meta.yaml b/addons/seaweedfs/3/meta.yaml index b4f278c0..34f2a0a2 100644 --- a/addons/seaweedfs/3/meta.yaml +++ b/addons/seaweedfs/3/meta.yaml @@ -21,6 +21,9 @@ allow_parameters: - name: "master.service.type" required: false description: "master service type config for values.yaml" +- name: "master.service.externalTrafficPolicy" + required: false + description: "master service externalTrafficPolicy config for values.yaml" - name: "master.defaultReplication" required: false description: "master defaultReplication config for values.yaml" @@ -30,7 +33,13 @@ allow_parameters: - name: "filer.service.type" required: false description: "filer service type config for values.yaml" +- name: "filer.service.externalTrafficPolicy" + required: false + description: "filer service externalTrafficPolicy config for values.yaml" - name: "volume.service.type" required: false description: "volume service type config for values.yaml" +- name: "volume.service.externalTrafficPolicy" + required: false + description: "volume service externalTrafficPolicy config for values.yaml" archive: false diff --git a/addons/spark/3.4/meta.yaml b/addons/spark/3.4/meta.yaml index 083883cf..b817e5c3 100644 --- a/addons/spark/3.4/meta.yaml +++ b/addons/spark/3.4/meta.yaml @@ -24,6 +24,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "master.configOptions" required: false description: "master configOptions config for values.yaml" diff --git a/addons/zookeeper/3.9/meta.yaml b/addons/zookeeper/3.9/meta.yaml index c8c555cb..75b2b590 100644 --- a/addons/zookeeper/3.9/meta.yaml +++ b/addons/zookeeper/3.9/meta.yaml @@ -24,6 +24,9 @@ allow_parameters: - name: "service.type" required: false description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" - name: "metrics.enabled" required: false description: "metrics enabled or not config for values.yaml" From 40d5f36c50505bf81a1b1854946e3e3be9b39744 Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 5 Aug 2025 15:57:22 +0800 Subject: [PATCH 67/93] fix(victoriametrics): fixed plan typo (#115) --- .../1/plans/standard-16c32g500/values.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml b/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml index 7ebeff88..ccecde68 100644 --- a/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml +++ b/addons/victoriametrics/1/plans/standard-16c32g500/values.yaml @@ -7,7 +7,7 @@ vmauth: cpu: 1000m memory: 1024Mi requests: - cpu: 1600m + cpu: 16000m memory: 32Gi replicaCount: 2 @@ -17,7 +17,7 @@ vmselect: cpu: 1000m memory: 1024Mi requests: - cpu: 1600m + cpu: 16000m memory: 32Gi replicaCount: 2 @@ -27,7 +27,7 @@ vminsert: cpu: 1000m memory: 1024Mi requests: - cpu: 1600m + cpu: 16000m memory: 32Gi replicaCount: 2 @@ -37,7 +37,7 @@ vmstorage: cpu: 1000m memory: 1024Mi requests: - cpu: 1600m + cpu: 16000m memory: 32Gi replicaCount: 3 persistence: From 18e17b58210d704596d07196b1155895af87777f Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 1 Sep 2025 16:42:44 +0800 Subject: [PATCH 68/93] chore(addons): plan instance-schema --- .../standard-16c48g2w/instance-schema.json | 21 ++ .../standard-1c2g2w/instance-schema.json | 21 ++ .../standard-24c64g7w/instance-schema.json | 21 ++ .../standard-2c4g2w/instance-schema.json | 21 ++ .../standard-4c16g2w/instance-schema.json | 21 ++ .../standard-4c8g2w/instance-schema.json | 21 ++ .../standard-8c32g2w/instance-schema.json | 21 ++ .../standard-16c48g2w/instance-schema.json | 36 +++ .../standard-1c2g2w/instance-schema.json | 36 +++ .../standard-24c64g7w/instance-schema.json | 36 +++ .../standard-2c4g2w/instance-schema.json | 36 +++ .../standard-4c16g2w/instance-schema.json | 36 +++ .../standard-4c8g2w/instance-schema.json | 36 +++ .../standard-8c32g2w/instance-schema.json | 36 +++ addons/apollo-bk/2.3/chart/apollo/.helmignore | 21 ++ addons/apollo-bk/2.3/chart/apollo/1.yaml | 260 +++++++++++++++++ addons/apollo-bk/2.3/chart/apollo/Chart.yaml | 25 ++ addons/apollo-bk/2.3/chart/apollo/README.md | 0 .../2.3/chart/apollo/templates/NOTES.txt | 0 .../2.3/chart/apollo/templates/_helpers.tpl | 133 +++++++++ .../apollo/templates/adminservice/NOTES.txt | 32 ++ .../templates/adminservice/deployment.yaml | 88 ++++++ .../templates/adminservice/ingress.yaml | 63 ++++ .../apollo/templates/adminservice/secret.yaml | 20 ++ .../templates/adminservice/service.yaml | 22 ++ .../apollo/templates/configservice/NOTES.txt | 32 ++ .../templates/configservice/deployment.yaml | 88 ++++++ .../templates/configservice/ingress.yaml | 63 ++++ .../templates/configservice/secret.yaml | 22 ++ .../templates/configservice/service.yaml | 22 ++ .../chart/apollo/templates/portal/NOTES.txt | 25 ++ .../apollo/templates/portal/deployment.yaml | 102 +++++++ .../apollo/templates/portal/ingress.yaml | 64 ++++ .../chart/apollo/templates/portal/secret.yaml | 36 +++ .../apollo/templates/portal/service.yaml | 23 ++ addons/apollo-bk/2.3/chart/apollo/values.yaml | 273 ++++++++++++++++++ addons/apollo-bk/2.3/meta.yaml | 27 ++ .../2.3/plans/standard-1c2g2w/bind.yaml | 43 +++ .../standard-1c2g2w/instance-schema.json} | 0 .../2.3/plans/standard-1c2g2w/meta.yaml | 6 + .../2.3/plans/standard-1c2g2w/values.yaml | 60 ++++ .../standard-16c64g1000/instance-schema.json} | 0 .../standard-2c4g20/instance-schema.json} | 0 .../instance-schema.json} | 0 .../standard-4c16g100/instance-schema.json} | 0 .../standard-8c32g500/instance-schema.json} | 0 .../plans/standard-10/instance-schema.json} | 0 .../standard-2c4g5w/instance-schema.json | 21 ++ .../standard-4c8g5w/instance-schema.json | 21 ++ .../standard-1000m/instance-schema.json} | 0 .../plans/standard-200m/instance-schema.json} | 0 .../plans/standard-500m/instance-schema.json} | 0 .../10/chart/grafana/instance-schema.json} | 0 .../standard-16c32g3w/instance-schema.json} | 0 .../standard-1c2g3w/instance-schema.json} | 0 .../standard-24c64g3w/instance-schema.json} | 0 .../standard-2c4g3w/instance-schema.json} | 0 .../standard-4c8g3w/instance-schema.json} | 0 .../standard-8c16g3w/instance-schema.json} | 0 .../standard-16c32g1024/instance-schema.json} | 0 .../standard-1c2g64/instance-schema.json} | 0 .../standard-2c4g128/instance-schema.json} | 0 .../standard-4c8g256/instance-schema.json} | 0 .../standard-8c16g512/instance-schema.json} | 0 .../standard-16c32g1024/instance-schema.json} | 0 .../standard-1c2g64/instance-schema.json} | 0 .../standard-2c4g128/instance-schema.json} | 0 .../standard-4c8g256/instance-schema.json} | 0 .../standard-8c16g512/instance-schema.json} | 0 .../plans/standard-1c1g/instance-schema.json} | 0 .../plans/standard-4c4g/instance-schema.json} | 0 .../standard-v4s1024/instance-schema.json} | 0 .../standard-v4s128/instance-schema.json} | 0 .../standard-v4s2048/instance-schema.json} | 0 .../standard-v4s256/instance-schema.json} | 0 .../standard-v4s3096/instance-schema.json} | 0 .../standard-v4s32/instance-schema.json} | 0 .../standard-v4s512/instance-schema.json} | 0 .../standard-v4s64/instance-schema.json} | 0 .../standard-v6d4s1T/instance-schema.json} | 0 .../standard-v8d4s1T/instance-schema.json} | 0 .../standard-v8d4s2T/instance-schema.json} | 0 .../standard-v8d4s3T/instance-schema.json} | 0 .../standard-16c64g400/instance-schema.json} | 0 .../standard-1c2g10/instance-schema.json} | 0 .../standard-2c4g20/instance-schema.json} | 0 .../standard-2c8g50/instance-schema.json} | 0 .../standard-32c128g800/instance-schema.json} | 0 .../standard-4c16g100/instance-schema.json} | 0 .../standard-8c32g200/instance-schema.json} | 0 .../standard-16c64g400/instance-schema.json} | 0 .../standard-2c4g20/instance-schema.json} | 0 .../standard-2c8g50/instance-schema.json} | 0 .../standard-32c128g800/instance-schema.json} | 0 .../standard-4c16g100/instance-schema.json} | 0 .../standard-8c32g200/instance-schema.json} | 0 .../standard-2c4g32/instance-schema.json} | 0 .../standard-2c4g64/instance-schema.json} | 0 .../standard-4c16g256/instance-schema.json} | 0 .../standard-4c16g512/instance-schema.json} | 0 .../standard-4c8g128/instance-schema.json} | 0 .../standard-8c32g1024/instance-schema.json} | 0 .../standard-8c32g2048/instance-schema.json} | 0 .../standard-8c32g768/instance-schema.json} | 0 .../standard-2c4g32/instance-schema.json} | 0 .../standard-2c4g64/instance-schema.json} | 0 .../standard-4c16g256/instance-schema.json} | 0 .../standard-4c16g512/instance-schema.json} | 0 .../standard-4c8g128/instance-schema.json} | 0 .../standard-8c32g1024/instance-schema.json} | 0 .../standard-8c32g2048/instance-schema.json} | 0 .../standard-8c32g768/instance-schema.json} | 0 .../standard-2c2g10/instance-schema.json} | 0 .../standard-4c8g100/instance-schema.json} | 0 .../standard-8c16g200/instance-schema.json} | 0 .../standard-16c64g400/instance-schema.json} | 0 .../standard-2c4g20/instance-schema.json} | 0 .../standard-2c8g50/instance-schema.json} | 0 .../standard-32c128g800/instance-schema.json} | 0 .../standard-32c64g4000/instance-schema.json} | 0 .../standard-4c16g100/instance-schema.json} | 0 .../standard-8c32g200/instance-schema.json} | 0 .../standard-16c64g400/instance-schema.json} | 0 .../standard-2c4g20/instance-schema.json} | 0 .../standard-2c8g50/instance-schema.json} | 0 .../standard-32c128g800/instance-schema.json} | 0 .../standard-32c64g4000/instance-schema.json} | 0 .../standard-4c16g100/instance-schema.json} | 0 .../standard-8c32g200/instance-schema.json} | 0 .../standard-16c32g500/instance-schema.json} | 0 .../standard-1c1g10/instance-schema.json} | 0 .../standard-2c4g50/instance-schema.json} | 0 .../standard-4c8g100/instance-schema.json} | 0 .../standard-8c16g200/instance-schema.json} | 0 .../standard-16c32g3w/instance-schema.json} | 0 .../standard-2c4g3w/instance-schema.json} | 0 .../standard-4c8g3w/instance-schema.json} | 0 .../standard-8c16g3w/instance-schema.json} | 0 .../standard-16c32g3w/instance-schema.json} | 0 .../standard-2c4g3w/instance-schema.json} | 0 .../standard-4c8g3w/instance-schema.json} | 0 .../standard-8c16g3w/instance-schema.json} | 0 .../plans/standard-1024/instance-schema.json} | 0 .../plans/standard-128/instance-schema.json} | 0 .../standard-16384/instance-schema.json} | 0 .../plans/standard-2048/instance-schema.json} | 0 .../plans/standard-256/instance-schema.json} | 0 .../standard-32768/instance-schema.json} | 0 .../plans/standard-4096/instance-schema.json} | 0 .../plans/standard-512/instance-schema.json} | 0 .../standard-65536/instance-schema.json} | 0 .../plans/standard-8192/instance-schema.json} | 0 .../plans/standard-1024/instance-schema.json} | 0 .../plans/standard-128/instance-schema.json} | 0 .../standard-131072/instance-schema.json} | 0 .../standard-16384/instance-schema.json} | 0 .../plans/standard-2048/instance-schema.json} | 0 .../plans/standard-256/instance-schema.json} | 0 .../standard-32768/instance-schema.json} | 0 .../plans/standard-4096/instance-schema.json} | 0 .../plans/standard-512/instance-schema.json} | 0 .../standard-65536/instance-schema.json} | 0 .../plans/standard-8192/instance-schema.json} | 0 .../standard-v4s1024/instance-schema.json} | 0 .../standard-v4s128/instance-schema.json} | 0 .../standard-v4s2048/instance-schema.json} | 0 .../standard-v4s256/instance-schema.json} | 0 .../standard-v4s3072/instance-schema.json} | 0 .../standard-v4s32/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-v4s512/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-v4s64/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-16c32g5w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-16c64g5w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-1c2g2w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-24c48g5w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-2c4g2w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-4c8g3w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-8c16g3w/instance-schema.json | 36 +++ .../create-instance-schema.json | 12 - .../standard-16c32g500/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-1c1g10/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-2c4g50/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-4c8g100/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-8c16g200/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-1c2g3w10/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-2c4g3w20/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-16c32g3w/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-1c2g3w/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-2c4g3w/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-2c4g5w/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-4c8g3w/instance-schema.json} | 0 .../create-instance-schema.json | 12 - .../standard-8c16g3w/instance-schema.json} | 0 213 files changed, 2243 insertions(+), 264 deletions(-) create mode 100644 addons/airflow/2/plans/standard-16c48g2w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-1c2g2w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-24c64g7w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-2c4g2w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-4c16g2w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-4c8g2w/instance-schema.json create mode 100644 addons/airflow/2/plans/standard-8c32g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-16c48g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-1c2g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-24c64g7w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-2c4g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-4c16g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-4c8g2w/instance-schema.json create mode 100644 addons/airflow/3/plans/standard-8c32g2w/instance-schema.json create mode 100644 addons/apollo-bk/2.3/chart/apollo/.helmignore create mode 100644 addons/apollo-bk/2.3/chart/apollo/1.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/Chart.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/README.md create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml create mode 100644 addons/apollo-bk/2.3/chart/apollo/values.yaml create mode 100644 addons/apollo-bk/2.3/meta.yaml create mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml rename addons/{airflow/2/plans/standard-16c48g2w/create-instance-schema.json => apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json} (100%) create mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml create mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml rename addons/{airflow/2/plans/standard-1c2g2w/create-instance-schema.json => clickhouse/24/plans/standard-16c64g1000/instance-schema.json} (100%) rename addons/{airflow/2/plans/standard-24c64g7w/create-instance-schema.json => clickhouse/24/plans/standard-2c4g20/instance-schema.json} (100%) rename addons/{airflow/2/plans/standard-2c4g2w/create-instance-schema.json => clickhouse/24/plans/standard-32c64g12000/instance-schema.json} (100%) rename addons/{airflow/2/plans/standard-4c16g2w/create-instance-schema.json => clickhouse/24/plans/standard-4c16g100/instance-schema.json} (100%) rename addons/{airflow/2/plans/standard-4c8g2w/create-instance-schema.json => clickhouse/24/plans/standard-8c32g500/instance-schema.json} (100%) rename addons/{airflow/2/plans/standard-8c32g2w/create-instance-schema.json => cloudbeaver/23/plans/standard-10/instance-schema.json} (100%) create mode 100644 addons/flink/1.17/plans/standard-2c4g5w/instance-schema.json create mode 100644 addons/flink/1.17/plans/standard-4c8g5w/instance-schema.json rename addons/{airflow/3/plans/standard-16c48g2w/create-instance-schema.json => fluentbit/2/plans/standard-1000m/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-1c2g2w/create-instance-schema.json => fluentbit/2/plans/standard-200m/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-24c64g7w/create-instance-schema.json => fluentbit/2/plans/standard-500m/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-2c4g2w/create-instance-schema.json => grafana/10/chart/grafana/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-4c16g2w/create-instance-schema.json => kafka/3.6/plans/standard-16c32g3w/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-4c8g2w/create-instance-schema.json => kafka/3.6/plans/standard-1c2g3w/instance-schema.json} (100%) rename addons/{airflow/3/plans/standard-8c32g2w/create-instance-schema.json => kafka/3.6/plans/standard-24c64g3w/instance-schema.json} (100%) rename addons/{clickhouse/24/plans/standard-16c64g1000/create-instance-schema.json => kafka/3.6/plans/standard-2c4g3w/instance-schema.json} (100%) rename addons/{clickhouse/24/plans/standard-2c4g20/create-instance-schema.json => kafka/3.6/plans/standard-4c8g3w/instance-schema.json} (100%) rename addons/{clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json => kafka/3.6/plans/standard-8c16g3w/instance-schema.json} (100%) rename addons/{clickhouse/24/plans/standard-4c16g100/create-instance-schema.json => kvrocks/2.10/plans/standard-16c32g1024/instance-schema.json} (100%) rename addons/{clickhouse/24/plans/standard-8c32g500/create-instance-schema.json => kvrocks/2.10/plans/standard-1c2g64/instance-schema.json} (100%) rename addons/{cloudbeaver/23/plans/standard-10/create-instance-schema.json => kvrocks/2.10/plans/standard-2c4g128/instance-schema.json} (100%) rename addons/{flink/1.17/plans/standard-2c4g5w/create-instance-schema.json => kvrocks/2.10/plans/standard-4c8g256/instance-schema.json} (100%) rename addons/{flink/1.17/plans/standard-4c8g5w/create-instance-schema.json => kvrocks/2.10/plans/standard-8c16g512/instance-schema.json} (100%) rename addons/{fluentbit/2/plans/standard-1000m/create-instance-schema.json => kvrocks/2.8/plans/standard-16c32g1024/instance-schema.json} (100%) rename addons/{fluentbit/2/plans/standard-200m/create-instance-schema.json => kvrocks/2.8/plans/standard-1c2g64/instance-schema.json} (100%) rename addons/{fluentbit/2/plans/standard-500m/create-instance-schema.json => kvrocks/2.8/plans/standard-2c4g128/instance-schema.json} (100%) rename addons/{grafana/10/chart/grafana/create-instance-schema.json => kvrocks/2.8/plans/standard-4c8g256/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-16c32g3w/create-instance-schema.json => kvrocks/2.8/plans/standard-8c16g512/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-1c2g3w/create-instance-schema.json => lakefs/1.52/plans/standard-1c1g/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-24c64g3w/create-instance-schema.json => lakefs/1.52/plans/standard-4c4g/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-2c4g3w/create-instance-schema.json => minio/2023/plans/standard-v4s1024/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-4c8g3w/create-instance-schema.json => minio/2023/plans/standard-v4s128/instance-schema.json} (100%) rename addons/{kafka/3.6/plans/standard-8c16g3w/create-instance-schema.json => minio/2023/plans/standard-v4s2048/instance-schema.json} (100%) rename addons/{kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json => minio/2023/plans/standard-v4s256/instance-schema.json} (100%) rename addons/{kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json => minio/2023/plans/standard-v4s3096/instance-schema.json} (100%) rename addons/{kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json => minio/2023/plans/standard-v4s32/instance-schema.json} (100%) rename addons/{kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json => minio/2023/plans/standard-v4s512/instance-schema.json} (100%) rename addons/{kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json => minio/2023/plans/standard-v4s64/instance-schema.json} (100%) rename addons/{kvrocks/2.8/plans/standard-16c32g1024/create-instance-schema.json => minio/2023/plans/standard-v6d4s1T/instance-schema.json} (100%) rename addons/{kvrocks/2.8/plans/standard-1c2g64/create-instance-schema.json => minio/2023/plans/standard-v8d4s1T/instance-schema.json} (100%) rename addons/{kvrocks/2.8/plans/standard-2c4g128/create-instance-schema.json => minio/2023/plans/standard-v8d4s2T/instance-schema.json} (100%) rename addons/{kvrocks/2.8/plans/standard-4c8g256/create-instance-schema.json => minio/2023/plans/standard-v8d4s3T/instance-schema.json} (100%) rename addons/{kvrocks/2.8/plans/standard-8c16g512/create-instance-schema.json => mongodb/7.0/plans/standard-16c64g400/instance-schema.json} (100%) rename addons/{lakefs/1.52/plans/standard-1c1g/create-instance-schema.json => mongodb/7.0/plans/standard-1c2g10/instance-schema.json} (100%) rename addons/{lakefs/1.52/plans/standard-4c4g/create-instance-schema.json => mongodb/7.0/plans/standard-2c4g20/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s1024/create-instance-schema.json => mongodb/7.0/plans/standard-2c8g50/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s128/create-instance-schema.json => mongodb/7.0/plans/standard-32c128g800/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s2048/create-instance-schema.json => mongodb/7.0/plans/standard-4c16g100/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s256/create-instance-schema.json => mongodb/7.0/plans/standard-8c32g200/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s3096/create-instance-schema.json => mysql-cluster/8.0/plans/standard-16c64g400/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s32/create-instance-schema.json => mysql-cluster/8.0/plans/standard-2c4g20/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s512/create-instance-schema.json => mysql-cluster/8.0/plans/standard-2c8g50/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v4s64/create-instance-schema.json => mysql-cluster/8.0/plans/standard-32c128g800/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v6d4s1T/create-instance-schema.json => mysql-cluster/8.0/plans/standard-4c16g100/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v8d4s1T/create-instance-schema.json => mysql-cluster/8.0/plans/standard-8c32g200/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v8d4s2T/create-instance-schema.json => opensearch/2.10/plans/standard-2c4g32/instance-schema.json} (100%) rename addons/{minio/2023/plans/standard-v8d4s3T/create-instance-schema.json => opensearch/2.10/plans/standard-2c4g64/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-16c64g400/create-instance-schema.json => opensearch/2.10/plans/standard-4c16g256/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-1c2g10/create-instance-schema.json => opensearch/2.10/plans/standard-4c16g512/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-2c4g20/create-instance-schema.json => opensearch/2.10/plans/standard-4c8g128/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-2c8g50/create-instance-schema.json => opensearch/2.10/plans/standard-8c32g1024/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-32c128g800/create-instance-schema.json => opensearch/2.10/plans/standard-8c32g2048/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-4c16g100/create-instance-schema.json => opensearch/2.10/plans/standard-8c32g768/instance-schema.json} (100%) rename addons/{mongodb/7.0/plans/standard-8c32g200/create-instance-schema.json => opensearch/3.0/plans/standard-2c4g32/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-16c64g400/create-instance-schema.json => opensearch/3.0/plans/standard-2c4g64/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-2c4g20/create-instance-schema.json => opensearch/3.0/plans/standard-4c16g256/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-2c8g50/create-instance-schema.json => opensearch/3.0/plans/standard-4c16g512/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-32c128g800/create-instance-schema.json => opensearch/3.0/plans/standard-4c8g128/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-4c16g100/create-instance-schema.json => opensearch/3.0/plans/standard-8c32g1024/instance-schema.json} (100%) rename addons/{mysql-cluster/8.0/plans/standard-8c32g200/create-instance-schema.json => opensearch/3.0/plans/standard-8c32g2048/instance-schema.json} (100%) rename addons/opensearch/{2.10/plans/standard-2c4g32/create-instance-schema.json => 3.0/plans/standard-8c32g768/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-2c4g64/create-instance-schema.json => pmm/2.41/plans/standard-2c2g10/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-4c16g256/create-instance-schema.json => pmm/2.41/plans/standard-4c8g100/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-4c16g512/create-instance-schema.json => pmm/2.41/plans/standard-8c16g200/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-4c8g128/create-instance-schema.json => postgresql-cluster/15/plans/standard-16c64g400/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-8c32g1024/create-instance-schema.json => postgresql-cluster/15/plans/standard-2c4g20/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-8c32g2048/create-instance-schema.json => postgresql-cluster/15/plans/standard-2c8g50/instance-schema.json} (100%) rename addons/{opensearch/2.10/plans/standard-8c32g768/create-instance-schema.json => postgresql-cluster/15/plans/standard-32c128g800/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-2c4g32/create-instance-schema.json => postgresql-cluster/15/plans/standard-32c64g4000/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json => postgresql-cluster/15/plans/standard-4c16g100/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json => postgresql-cluster/15/plans/standard-8c32g200/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json => postgresql-cluster/16/plans/standard-16c64g400/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json => postgresql-cluster/16/plans/standard-2c4g20/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json => postgresql-cluster/16/plans/standard-2c8g50/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json => postgresql-cluster/16/plans/standard-32c128g800/instance-schema.json} (100%) rename addons/{opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json => postgresql-cluster/16/plans/standard-32c64g4000/instance-schema.json} (100%) rename addons/{pmm/2.41/plans/standard-2c2g10/create-instance-schema.json => postgresql-cluster/16/plans/standard-4c16g100/instance-schema.json} (100%) rename addons/{pmm/2.41/plans/standard-4c8g100/create-instance-schema.json => postgresql-cluster/16/plans/standard-8c32g200/instance-schema.json} (100%) rename addons/{pmm/2.41/plans/standard-8c16g200/create-instance-schema.json => prometheus/2/plans/standard-16c32g500/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-16c64g400/create-instance-schema.json => prometheus/2/plans/standard-1c1g10/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-2c4g20/create-instance-schema.json => prometheus/2/plans/standard-2c4g50/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-2c8g50/create-instance-schema.json => prometheus/2/plans/standard-4c8g100/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-32c128g800/create-instance-schema.json => prometheus/2/plans/standard-8c16g200/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-32c64g4000/create-instance-schema.json => rabbitmq/3.12/plans/standard-16c32g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-4c16g100/create-instance-schema.json => rabbitmq/3.12/plans/standard-2c4g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/15/plans/standard-8c32g200/create-instance-schema.json => rabbitmq/3.12/plans/standard-4c8g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json => rabbitmq/3.12/plans/standard-8c16g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json => rabbitmq/4.0/plans/standard-16c32g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json => rabbitmq/4.0/plans/standard-2c4g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json => rabbitmq/4.0/plans/standard-4c8g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json => rabbitmq/4.0/plans/standard-8c16g3w/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json => redis-cluster/7.0/plans/standard-1024/instance-schema.json} (100%) rename addons/{postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json => redis-cluster/7.0/plans/standard-128/instance-schema.json} (100%) rename addons/{prometheus/2/plans/standard-16c32g500/create-instance-schema.json => redis-cluster/7.0/plans/standard-16384/instance-schema.json} (100%) rename addons/{prometheus/2/plans/standard-1c1g10/create-instance-schema.json => redis-cluster/7.0/plans/standard-2048/instance-schema.json} (100%) rename addons/{prometheus/2/plans/standard-2c4g50/create-instance-schema.json => redis-cluster/7.0/plans/standard-256/instance-schema.json} (100%) rename addons/{prometheus/2/plans/standard-4c8g100/create-instance-schema.json => redis-cluster/7.0/plans/standard-32768/instance-schema.json} (100%) rename addons/{prometheus/2/plans/standard-8c16g200/create-instance-schema.json => redis-cluster/7.0/plans/standard-4096/instance-schema.json} (100%) rename addons/{rabbitmq/3.12/plans/standard-16c32g3w/create-instance-schema.json => redis-cluster/7.0/plans/standard-512/instance-schema.json} (100%) rename addons/{rabbitmq/3.12/plans/standard-2c4g3w/create-instance-schema.json => redis-cluster/7.0/plans/standard-65536/instance-schema.json} (100%) rename addons/{rabbitmq/3.12/plans/standard-4c8g3w/create-instance-schema.json => redis-cluster/7.0/plans/standard-8192/instance-schema.json} (100%) rename addons/{rabbitmq/3.12/plans/standard-8c16g3w/create-instance-schema.json => redis/7.0/plans/standard-1024/instance-schema.json} (100%) rename addons/{rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json => redis/7.0/plans/standard-128/instance-schema.json} (100%) rename addons/{rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json => redis/7.0/plans/standard-131072/instance-schema.json} (100%) rename addons/{rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json => redis/7.0/plans/standard-16384/instance-schema.json} (100%) rename addons/{rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json => redis/7.0/plans/standard-2048/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-1024/create-instance-schema.json => redis/7.0/plans/standard-256/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-128/create-instance-schema.json => redis/7.0/plans/standard-32768/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-16384/create-instance-schema.json => redis/7.0/plans/standard-4096/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-2048/create-instance-schema.json => redis/7.0/plans/standard-512/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-256/create-instance-schema.json => redis/7.0/plans/standard-65536/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-32768/create-instance-schema.json => redis/7.0/plans/standard-8192/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-4096/create-instance-schema.json => seaweedfs/3/plans/standard-v4s1024/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-512/create-instance-schema.json => seaweedfs/3/plans/standard-v4s128/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-65536/create-instance-schema.json => seaweedfs/3/plans/standard-v4s2048/instance-schema.json} (100%) rename addons/{redis-cluster/7.0/plans/standard-8192/create-instance-schema.json => seaweedfs/3/plans/standard-v4s256/instance-schema.json} (100%) rename addons/{redis/7.0/plans/standard-1024/create-instance-schema.json => seaweedfs/3/plans/standard-v4s3072/instance-schema.json} (100%) rename addons/{redis/7.0/plans/standard-128/create-instance-schema.json => seaweedfs/3/plans/standard-v4s32/instance-schema.json} (100%) delete mode 100644 addons/seaweedfs/3/plans/standard-v4s512/create-instance-schema.json rename addons/{redis/7.0/plans/standard-131072/create-instance-schema.json => seaweedfs/3/plans/standard-v4s512/instance-schema.json} (100%) delete mode 100644 addons/seaweedfs/3/plans/standard-v4s64/create-instance-schema.json rename addons/{redis/7.0/plans/standard-16384/create-instance-schema.json => seaweedfs/3/plans/standard-v4s64/instance-schema.json} (100%) delete mode 100644 addons/spark/3.4/plans/standard-16c32g5w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-16c32g5w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-16c64g5w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-1c2g2w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-1c2g2w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-24c48g5w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-24c48g5w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-2c4g2w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-2c4g2w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-4c8g3w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-4c8g3w/instance-schema.json delete mode 100644 addons/spark/3.4/plans/standard-8c16g3w/create-instance-schema.json create mode 100644 addons/spark/3.4/plans/standard-8c16g3w/instance-schema.json delete mode 100644 addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json rename addons/{redis/7.0/plans/standard-2048/create-instance-schema.json => victoriametrics/1/plans/standard-16c32g500/instance-schema.json} (100%) delete mode 100644 addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json rename addons/{redis/7.0/plans/standard-256/create-instance-schema.json => victoriametrics/1/plans/standard-1c1g10/instance-schema.json} (100%) delete mode 100644 addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json rename addons/{redis/7.0/plans/standard-32768/create-instance-schema.json => victoriametrics/1/plans/standard-2c4g50/instance-schema.json} (100%) delete mode 100644 addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json rename addons/{redis/7.0/plans/standard-4096/create-instance-schema.json => victoriametrics/1/plans/standard-4c8g100/instance-schema.json} (100%) delete mode 100644 addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json rename addons/{redis/7.0/plans/standard-512/create-instance-schema.json => victoriametrics/1/plans/standard-8c16g200/instance-schema.json} (100%) delete mode 100644 addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json rename addons/{redis/7.0/plans/standard-65536/create-instance-schema.json => yugabytedb/2024/plans/standard-1c2g3w10/instance-schema.json} (100%) delete mode 100644 addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json rename addons/{redis/7.0/plans/standard-8192/create-instance-schema.json => yugabytedb/2024/plans/standard-2c4g3w20/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-16c32g3w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s1024/create-instance-schema.json => zookeeper/3.9/plans/standard-16c32g3w/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-1c2g3w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s128/create-instance-schema.json => zookeeper/3.9/plans/standard-1c2g3w/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-2c4g3w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s2048/create-instance-schema.json => zookeeper/3.9/plans/standard-2c4g3w/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-2c4g5w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s256/create-instance-schema.json => zookeeper/3.9/plans/standard-2c4g5w/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-4c8g3w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s3072/create-instance-schema.json => zookeeper/3.9/plans/standard-4c8g3w/instance-schema.json} (100%) delete mode 100644 addons/zookeeper/3.9/plans/standard-8c16g3w/create-instance-schema.json rename addons/{seaweedfs/3/plans/standard-v4s32/create-instance-schema.json => zookeeper/3.9/plans/standard-8c16g3w/instance-schema.json} (100%) diff --git a/addons/airflow/2/plans/standard-16c48g2w/instance-schema.json b/addons/airflow/2/plans/standard-16c48g2w/instance-schema.json new file mode 100644 index 00000000..1b05f1ab --- /dev/null +++ b/addons/airflow/2/plans/standard-16c48g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|20(?:[0-3]\\d|4[0-8]))Gi|(?:1(?:.\\d)?|2(?:.0)?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-1c2g2w/instance-schema.json b/addons/airflow/2/plans/standard-1c2g2w/instance-schema.json new file mode 100644 index 00000000..42d75a46 --- /dev/null +++ b/addons/airflow/2/plans/standard-1c2g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|100)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-24c64g7w/instance-schema.json b/addons/airflow/2/plans/standard-24c64g7w/instance-schema.json new file mode 100644 index 00000000..5d937c19 --- /dev/null +++ b/addons/airflow/2/plans/standard-24c64g7w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|2(?:[0-4]\\d{2}|5(?:[0-5]\\d|60)))Gi|(?:1(?:.\\d)?|2(?:.[0-5])?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-2c4g2w/instance-schema.json b/addons/airflow/2/plans/standard-2c4g2w/instance-schema.json new file mode 100644 index 00000000..5618a94d --- /dev/null +++ b/addons/airflow/2/plans/standard-2c4g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|1\\d\\d|200)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-4c16g2w/instance-schema.json b/addons/airflow/2/plans/standard-4c16g2w/instance-schema.json new file mode 100644 index 00000000..d266ccc9 --- /dev/null +++ b/addons/airflow/2/plans/standard-4c16g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d{0,2}|1000)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-4c8g2w/instance-schema.json b/addons/airflow/2/plans/standard-4c8g2w/instance-schema.json new file mode 100644 index 00000000..56dbd3df --- /dev/null +++ b/addons/airflow/2/plans/standard-4c8g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d\\d|500)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/2/plans/standard-8c32g2w/instance-schema.json b/addons/airflow/2/plans/standard-8c32g2w/instance-schema.json new file mode 100644 index 00000000..8aab5330 --- /dev/null +++ b/addons/airflow/2/plans/standard-8c32g2w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1[0-4]\\d{2}|15(?:0\\d|1\\d|2\\d|3[0-6]))Gi|1(?:.[0-5])?Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-16c48g2w/instance-schema.json b/addons/airflow/3/plans/standard-16c48g2w/instance-schema.json new file mode 100644 index 00000000..25dc381c --- /dev/null +++ b/addons/airflow/3/plans/standard-16c48g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|20(?:[0-3]\\d|4[0-8]))Gi|(?:1(?:.\\d)?|2(?:.0)?)Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|20(?:[0-3]\\d|4[0-8]))Gi|(?:1(?:.\\d)?|2(?:.0)?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-1c2g2w/instance-schema.json b/addons/airflow/3/plans/standard-1c2g2w/instance-schema.json new file mode 100644 index 00000000..173fc6ad --- /dev/null +++ b/addons/airflow/3/plans/standard-1c2g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|100)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|100)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-24c64g7w/instance-schema.json b/addons/airflow/3/plans/standard-24c64g7w/instance-schema.json new file mode 100644 index 00000000..3b085f82 --- /dev/null +++ b/addons/airflow/3/plans/standard-24c64g7w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|2(?:[0-4]\\d{2}|5(?:[0-5]\\d|60)))Gi|(?:1(?:.\\d)?|2(?:.[0-5])?)Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|2(?:[0-4]\\d{2}|5(?:[0-5]\\d|60)))Gi|(?:1(?:.\\d)?|2(?:.[0-5])?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-2c4g2w/instance-schema.json b/addons/airflow/3/plans/standard-2c4g2w/instance-schema.json new file mode 100644 index 00000000..76ce5e58 --- /dev/null +++ b/addons/airflow/3/plans/standard-2c4g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|1\\d\\d|200)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|1\\d\\d|200)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-4c16g2w/instance-schema.json b/addons/airflow/3/plans/standard-4c16g2w/instance-schema.json new file mode 100644 index 00000000..ad23ff58 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c16g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d{0,2}|1000)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d{0,2}|1000)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-4c8g2w/instance-schema.json b/addons/airflow/3/plans/standard-4c8g2w/instance-schema.json new file mode 100644 index 00000000..207fee23 --- /dev/null +++ b/addons/airflow/3/plans/standard-4c8g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d\\d|500)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d\\d|500)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-8c32g2w/instance-schema.json b/addons/airflow/3/plans/standard-8c32g2w/instance-schema.json new file mode 100644 index 00000000..8c279154 --- /dev/null +++ b/addons/airflow/3/plans/standard-8c32g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "triggerer": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1[0-4]\\d{2}|15(?:0\\d|1\\d|2\\d|3[0-6]))Gi|1(?:.[0-5])?Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1[0-4]\\d{2}|15(?:0\\d|1\\d|2\\d|3[0-6]))Gi|1(?:.[0-5])?Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/.helmignore b/addons/apollo-bk/2.3/chart/apollo/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/addons/apollo-bk/2.3/chart/apollo/1.yaml b/addons/apollo-bk/2.3/chart/apollo/1.yaml new file mode 100644 index 00000000..c6edc2b8 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/1.yaml @@ -0,0 +1,260 @@ +--- +# Source: apollo/templates/adminservice/secret.yaml +kind: Secret +apiVersion: v1 +metadata: + name: release-name-apollo-adminservice +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 +--- +# Source: apollo/templates/portal/secret.yaml +kind: Secret +apiVersion: v1 +metadata: + name: release-name-apollo-portal +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloPortalDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 + apollo.portal.envs = dev + apollo-env.properties: | + dev = release-name-apollo-configservice:8080 +--- +# Source: apollo/templates/configservice/secret.yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: release-name-apollo-configservice +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 + spring.datasource.username = 1 + spring.datasource.password = 1 + apollo.config-service.url = http://release-name-apollo-configservice.default:8080 + apollo.admin-service.url = http://release-name-apollo-adminservice.default:8090 +--- +# Source: apollo/templates/adminservice/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-adminservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8090 + targetPort: 8090 + selector: + app: release-name-apollo-adminservice +--- +# Source: apollo/templates/configservice/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-configservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + app: release-name-apollo-configservice +--- +# Source: apollo/templates/portal/service.yaml +kind: Service +apiVersion: v1 +metadata: + name: release-name-apollo-portal + labels: + app.kubernetes.io/version: "2.3.0" +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 8070 + targetPort: 8070 + selector: + app: release-name-apollo-portal + sessionAffinity: ClientIP +--- +# Source: apollo/templates/adminservice/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-adminservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 2 + selector: + matchLabels: + app: release-name-apollo-adminservice + template: + metadata: + labels: + app: release-name-apollo-adminservice + spec: + volumes: + - name: volume-configmap-release-name-apollo-adminservice + configMap: + name: release-name-apollo-adminservice + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: apollo-adminservice + image: "drycc-addons/apollo-adminservice:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8090 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,kubernetes" + volumeMounts: + - name: volume-configmap-release-name-apollo-adminservice + mountPath: /apollo-adminservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: 8090 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8090 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/configservice/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-configservice + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 2 + selector: + matchLabels: + app: release-name-apollo-configservice + template: + metadata: + labels: + app: release-name-apollo-configservice + spec: + volumes: + - name: volume-configmap-release-name-apollo-configservice + configMap: + name: release-name-apollo-configservice + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: apollo-configservice + image: "drycc-addons/apollo-configservice:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,kubernetes" + volumeMounts: + - name: volume-configmap-release-name-apollo-configservice + mountPath: /apollo-configservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/portal/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: release-name-apollo-portal + labels: + app.kubernetes.io/version: "2.3.0" +spec: + replicas: 1 + selector: + matchLabels: + app: release-name-apollo-portal + template: + metadata: + labels: + app: release-name-apollo-portal + spec: + volumes: + - name: secret-release-name-apollo-portal + Secret: + name: release-name-apollo-portal + items: + - key: application-github.properties + path: application-github.properties + - key: apollo-env.properties + path: apollo-env.properties + defaultMode: 420 + containers: + - name: apollo-portal + image: "drycc-addons/apollo-portal:2.3" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8070 + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: "github,auth" + volumeMounts: + - name: secret-release-name-apollo-portal + mountPath: /apollo-portal/config/application-github.properties + subPath: application-github.properties + - name: secret-release-name-apollo-portal + mountPath: /apollo-portal/config/apollo-env.properties + subPath: apollo-env.properties + livenessProbe: + tcpSocket: + port: 8070 + initialDelaySeconds: 100 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8070 + initialDelaySeconds: 30 + periodSeconds: 5 + resources: + {} +--- +# Source: apollo/templates/portal/ingress.yaml +# diff --git a/addons/apollo-bk/2.3/chart/apollo/Chart.yaml b/addons/apollo-bk/2.3/chart/apollo/Chart.yaml new file mode 100644 index 00000000..07bdb346 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/Chart.yaml @@ -0,0 +1,25 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: config + licenses: Apache-2.0 +apiVersion: v2 +appVersion: "2.3.0" +dependencies: +- name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.3 +description: A Helm chart for Apollo Config Service and Apollo Admin Service +home: https://github.com/apolloconfig/apollo +icon: https://raw.githubusercontent.com/apolloconfig/apollo/master/apollo-portal/src/main/resources/static/img/logo-simple.png +keywords: +- apollo +- apolloconfig +maintainers: +- name: Drycc Community. + url: https://github.com/drycc-addons/addons +name: apollo +sources: +- https://github.com/drycc-addons/addons +version: 0.1.0 diff --git a/addons/apollo-bk/2.3/chart/apollo/README.md b/addons/apollo-bk/2.3/chart/apollo/README.md new file mode 100644 index 00000000..e69de29b diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt new file mode 100644 index 00000000..e69de29b diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl b/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl new file mode 100644 index 00000000..6baef133 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl @@ -0,0 +1,133 @@ +{{/* vim: set filetype=mustache: */}} + + +{{/* +Full name for portal service +*/}} +{{- define "apollo.portal.fullName" -}} +{{- if .Values.portal.fullNameOverride -}} +{{- .Values.portal.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.portal.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.portal.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "apollo.labels" -}} +{{- if .Chart.AppVersion -}} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- end -}} + +{{/* +Service name for portal +*/}} +{{- define "apollo.portal.serviceName" -}} +{{- if .Values.portal.service.fullNameOverride -}} +{{- .Values.portal.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.portal.fullName" .}} +{{- end -}} +{{- end -}} + + +{{/* vim: set filetype=mustache: */}} + +{{/* +Service name for configdb +*/}} +{{- define "apollo.configdb.serviceName" -}} +{{- .Values.apolloService.configdb.host -}} +{{- end -}} + +{{/* +Service port for configdb +*/}} +{{- define "apollo.configdb.servicePort" -}} +{{- if .Values.apolloService.configdb.service.enabled -}} +{{- .Values.apolloService.configdb.service.port -}} +{{- else -}} +{{- .Values.apolloService.configdb.port -}} +{{- end -}} +{{- end -}} + +{{/* +Full name for config service +*/}} +{{- define "apollo.configService.fullName" -}} +{{- if .Values.apolloService.configService.fullNameOverride -}} +{{- .Values.apolloService.configService.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.apolloService.configService.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.apolloService.configService.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Service name for config service +*/}} +{{- define "apollo.configService.serviceName" -}} +{{- if .Values.apolloService.configService.service.fullNameOverride -}} +{{- .Values.apolloService.configService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.configService.fullName" .}} +{{- end -}} +{{- end -}} + +{{/* +Config service url to be accessed by apollo-client +*/}} +{{- define "apollo.configService.serviceUrl" -}} +{{- if .Values.apolloService.configService.config.configServiceUrlOverride -}} +{{ .Values.apolloService.configService.config.configServiceUrlOverride }} +{{- else -}} +http://{{ include "apollo.configService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- end -}} +{{- end -}} + +{{/* +Full name for admin service +*/}} +{{- define "apollo.adminService.fullName" -}} +{{- if .Values.apolloService.adminService.fullNameOverride -}} +{{- .Values.apolloService.adminService.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if contains .Values.apolloService.adminService.name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name .Values.apolloService.adminService.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Service name for admin service +*/}} +{{- define "apollo.adminService.serviceName" -}} +{{- if .Values.apolloService.adminService.service.fullNameOverride -}} +{{- .Values.apolloService.adminService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{ include "apollo.adminService.fullName" .}} +{{- end -}} +{{- end -}} + +{{/* +Admin service url to be accessed by apollo-portal +*/}} +{{- define "apollo.adminService.serviceUrl" -}} +{{- if .Values.apolloService.configService.config.adminServiceUrlOverride -}} +{{ .Values.apolloService.configService.config.adminServiceUrlOverride -}} +{{- else -}} +http://{{ include "apollo.adminService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.adminService.service.port }}{{ .Values.apolloService.adminService.config.contextPath }} +{{- end -}} +{{- end -}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt new file mode 100644 index 00000000..78ce9341 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt @@ -0,0 +1,32 @@ +Meta service url for current release: +{{- if contains "NodePort" .Values.apolloService.configService.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} + echo {{ include "apollo.configService.serviceUrl" .}} + +For local test use: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 +{{- end }} + +{{- if .Values.apolloService.configService.ingress.enabled }} + +Ingress: +{{- range $host := .Values.apolloService.configService.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} + +Urls registered to meta service: +Config service: {{ include "apollo.configService.serviceUrl" .}} +Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml new file mode 100644 index 00000000..d115a3c3 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $adminServiceFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.apolloService.adminService.replicaCount }} + selector: + matchLabels: + app: {{ $adminServiceFullName }} + {{- with .Values.apolloService.adminService.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $adminServiceFullName }} + {{- with .Values.apolloService.adminService.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.apolloService.adminService.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: volume-configmap-{{ $adminServiceFullName }} + configMap: + name: {{ $adminServiceFullName }} + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: {{ .Values.apolloService.adminService.name }} + image: "{{ .Values.apolloService.adminService.image.repository }}:{{ .Values.apolloService.adminService.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.apolloService.adminService.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.apolloService.adminService.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.apolloService.adminService.config.profiles | quote }} + {{- range $key, $value := .Values.apolloService.adminService.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: volume-configmap-{{ $adminServiceFullName }} + mountPath: /apollo-adminservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: {{ .Values.apolloService.adminService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.adminService.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.adminService.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.apolloService.adminService.config.contextPath }}/health + port: {{ .Values.apolloService.adminService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.adminService.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.adminService.readiness.periodSeconds }} + resources: + {{- toYaml .Values.apolloService.adminService.resources | nindent 12 }} + {{- with .Values.apolloService.adminService.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.adminService.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.adminService.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml new file mode 100644 index 00000000..1f5efcdf --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- if .Values.apolloService.adminService.ingress.enabled -}} +{{- $fullName := include "apollo.adminService.fullName" . -}} +{{- $svcPort := .Values.apolloService.adminService.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.apolloService.adminService.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.apolloService.adminService.ingress.ingressClassName }} + ingressClassName: {{ .Values.apolloService.adminService.ingress.ingressClassName }} +{{- end }} +{{- if .Values.apolloService.adminService.ingress.tls }} + tls: + {{- range .Values.apolloService.adminService.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.apolloService.adminService.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml new file mode 100644 index 00000000..d7f35e89 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ $adminServiceFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} + spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} + {{- if .Values.apolloService.adminService.config.contextPath }} + server.servlet.context-path = {{ .Values.apolloService.adminService.config.contextPath }} + {{- end }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml new file mode 100644 index 00000000..ff44f6cc --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.adminService.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.apolloService.adminService.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.apolloService.adminService.service.port }} + targetPort: {{ .Values.apolloService.adminService.service.targetPort }} + selector: + app: {{ include "apollo.adminService.fullName" . }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt new file mode 100644 index 00000000..78ce9341 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt @@ -0,0 +1,32 @@ +Meta service url for current release: +{{- if contains "NodePort" .Values.apolloService.configService.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} +{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} + echo {{ include "apollo.configService.serviceUrl" .}} + +For local test use: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 +{{- end }} + +{{- if .Values.apolloService.configService.ingress.enabled }} + +Ingress: +{{- range $host := .Values.apolloService.configService.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} + +Urls registered to meta service: +Config service: {{ include "apollo.configService.serviceUrl" .}} +Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml new file mode 100644 index 00000000..947e8eb9 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $configServiceFullName := include "apollo.configService.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $configServiceFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.apolloService.configService.replicaCount }} + selector: + matchLabels: + app: {{ $configServiceFullName }} + {{- with .Values.apolloService.configService.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $configServiceFullName }} + {{- with .Values.apolloService.configService.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.apolloService.configService.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: volume-configmap-{{ $configServiceFullName }} + configMap: + name: {{ $configServiceFullName }} + items: + - key: application-github.properties + path: application-github.properties + defaultMode: 420 + containers: + - name: {{ .Values.apolloService.configService.name }} + image: "{{ .Values.apolloService.configService.image.repository }}:{{ .Values.apolloService.configService.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.apolloService.configService.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.apolloService.configService.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.apolloService.configService.config.profiles | quote }} + {{- range $key, $value := .Values.apolloService.configService.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: volume-configmap-{{ $configServiceFullName }} + mountPath: /apollo-configservice/config/application-github.properties + subPath: application-github.properties + livenessProbe: + tcpSocket: + port: {{ .Values.apolloService.configService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.configService.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.configService.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.apolloService.configService.config.contextPath }}/health + port: {{ .Values.apolloService.configService.containerPort }} + initialDelaySeconds: {{ .Values.apolloService.configService.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.apolloService.configService.readiness.periodSeconds }} + resources: + {{- toYaml .Values.apolloService.configService.resources | nindent 12 }} + {{- with .Values.apolloService.configService.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.configService.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apolloService.configService.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml new file mode 100644 index 00000000..36fc5421 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml @@ -0,0 +1,63 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- if .Values.apolloService.configService.ingress.enabled -}} +{{- $fullName := include "apollo.configService.fullName" . -}} +{{- $svcPort := .Values.apolloService.configService.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.apolloService.configService.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.apolloService.configService.ingress.ingressClassName }} + ingressClassName: {{ .Values.apolloService.configService.ingress.ingressClassName }} +{{- end }} +{{- if .Values.apolloService.configService.ingress.tls }} + tls: + {{- range .Values.apolloService.configService.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.apolloService.configService.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml new file mode 100644 index 00000000..5224b51d --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +{{- $configServiceFullName := include "apollo.configService.fullName" . }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ $configServiceFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} + spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} + apollo.config-service.url = {{ include "apollo.configService.serviceUrl" .}} + apollo.admin-service.url = {{ include "apollo.adminService.serviceUrl" .}} + {{- if .Values.apolloService.configService.config.contextPath }} + server.servlet.context-path = {{ .Values.apolloService.configService.config.contextPath }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml new file mode 100644 index 00000000..9bcbb5e1 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.apolloService.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.configService.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.apolloService.configService.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.apolloService.configService.service.port }} + targetPort: {{ .Values.apolloService.configService.service.targetPort }} + selector: + app: {{ include "apollo.configService.fullName" . }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt new file mode 100644 index 00000000..d49cfe0e --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt @@ -0,0 +1,25 @@ +Portal url for current release: +{{- if contains "NodePort" .Values.portal.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.portal.fullName" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.portal.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.portal.fullName" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.portal.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.portal.service.port }} +{{- else if contains "ClusterIP" .Values.portal.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.portal.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8070 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8070:8070 +{{- end }} + +{{- if .Values.portal.ingress.enabled }} + +Ingress: +{{- range $host := .Values.portal.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.portal.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml new file mode 100644 index 00000000..7c86b3e7 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml @@ -0,0 +1,102 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +{{- $portalFullName := include "apollo.portal.fullName" . }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ $portalFullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.portal.replicaCount }} + selector: + matchLabels: + app: {{ $portalFullName }} + {{- with .Values.portal.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ $portalFullName }} + {{- with .Values.portal.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.portal.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: secret-{{ $portalFullName }} + Secret: + name: {{ $portalFullName }} + items: + - key: application-github.properties + path: application-github.properties + - key: apollo-env.properties + path: apollo-env.properties + {{- range $fileName, $content := .Values.portal.config.files }} + - key: {{ $fileName }} + path: {{ $fileName }} + {{- end }} + defaultMode: 420 + containers: + - name: {{ .Values.portal.name }} + image: "{{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.portal.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.portal.containerPort }} + protocol: TCP + env: + - name: SPRING_PROFILES_ACTIVE + value: {{ .Values.portal.config.profiles | quote }} + {{- range $key, $value := .Values.portal.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + volumeMounts: + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/application-github.properties + subPath: application-github.properties + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/apollo-env.properties + subPath: apollo-env.properties + {{- range $fileName, $content := .Values.portal.config.files }} + - name: secret-{{ $portalFullName }} + mountPath: /apollo-portal/config/{{ $fileName }} + subPath: {{ $fileName }} + {{- end }} + livenessProbe: + tcpSocket: + port: {{ .Values.portal.containerPort }} + initialDelaySeconds: {{ .Values.portal.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.portal.liveness.periodSeconds }} + readinessProbe: + httpGet: + path: {{ .Values.portal.config.contextPath }}/health + port: {{ .Values.portal.containerPort }} + initialDelaySeconds: {{ .Values.portal.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.portal.readiness.periodSeconds }} + resources: + {{- toYaml .Values.portal.resources | nindent 12 }} + {{- with .Values.portal.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.portal.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.portal.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml new file mode 100644 index 00000000..b01dc05b --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml @@ -0,0 +1,64 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +# +{{- if .Values.portal.ingress.enabled -}} +{{- $fullName := include "apollo.portal.fullName" . -}} +{{- $svcPort := .Values.portal.service.port -}} +{{- $apiVersion := "extensions/v1beta1" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1" }} +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} +{{- $apiVersion = "networking.k8s.io/v1beta1" }} +{{- end }} +apiVersion: {{ $apiVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "apollo.labels" . | nindent 4 }} + {{- with .Values.portal.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.portal.ingress.ingressClassName }} + ingressClassName: {{ .Values.portal.ingress.ingressClassName }} +{{- end }} +{{- if .Values.portal.ingress.tls }} + tls: + {{- range .Values.portal.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.portal.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} + backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml new file mode 100644 index 00000000..32e8cfbb --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml @@ -0,0 +1,36 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +{{- $portalFullName := include "apollo.portal.fullName" . }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ $portalFullName }} +data: + application-github.properties: | + spring.datasource.url = jdbc:mysql://{{ .Values.portal.portaldb.host }}:{{ .Values.portal.portaldb.port }}/{{ .Values.portal.portaldb.dbName }}{{ if .Values.portal.portaldb.connectionStringProperties }}?{{ .Values.portal.portaldb.connectionStringProperties }}{{ end }} + spring.datasource.username = {{ required "portaldb.userName is required!" .Values.portal.portaldb.userName }} + spring.datasource.password = {{ required "portaldb.password is required!" .Values.portal.portaldb.password }} + {{- if .Values.portal.config.envs }} + apollo.portal.envs = {{ .Values.portal.config.envs }} + {{- end }} + {{- if .Values.portal.config.contextPath }} + server.servlet.context-path = {{ .Values.portal.config.contextPath }} + {{- end }} + apollo-env.properties: | + {{- if .Values.apolloService.enabled }} + {{ .Values.apolloService.meta }} = {{ include "apollo.configService.serviceName" . }}:{{ .Values.apolloService.configService.service.port }} + {{- end }} + {{- if .Values.portal.config.metaServers }} + {{- range $env, $address := .Values.portal.config.metaServers }} + {{ $env }}.meta = {{ $address }} + {{- end }} + {{- end }} +{{- range $fileName, $content := .Values.portal.config.files }} +{{ $fileName | indent 2 }}: | +{{ $content | indent 4 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml new file mode 100644 index 00000000..da8237d5 --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.portal.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "apollo.portal.serviceName" . }} + labels: + {{- include "apollo.labels" . | nindent 4 }} +spec: + type: {{ .Values.portal.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.portal.service.port }} + targetPort: {{ .Values.portal.service.targetPort }} + selector: + app: {{ include "apollo.portal.fullName" . }} + sessionAffinity: {{ .Values.portal.service.sessionAffinity }} +{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/values.yaml b/addons/apollo-bk/2.3/chart/apollo/values.yaml new file mode 100644 index 00000000..7a00ff0e --- /dev/null +++ b/addons/apollo-bk/2.3/chart/apollo/values.yaml @@ -0,0 +1,273 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +portal: + enabled: true + name: apollo-portal + fullNameOverride: "" + replicaCount: 1 + containerPort: 8070 + image: + registry: registry.drycc.cc + repository: drycc-addons/apollo-portal + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8070 + targetPort: 8070 + type: ClusterIP + sessionAffinity: ClientIP + ingress: + ingressClassName: null + enabled: false + annotations: {} + hosts: + - host: "" + paths: [] + tls: [] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + + config: + # spring profiles to activate + profiles: "github,auth" + # specify the env names, e.g. dev,pro + envs: "dev" + # specify the meta servers, e.g. + # dev: http://apollo-configservice-dev:8080 + # pro: http://apollo-configservice-pro:8080 + metaServers: "" + # specify the context path, e.g. /apollo + contextPath: "" + # extra config files for apollo-portal, e.g. application-ldap.yml + files: {} + + portaldb: + # apolloportaldb host + host: "" + port: 3306 + dbName: ApolloPortalDB + # apolloportaldb user name + userName: "1" + # apolloportaldb password + password: "1" + connectionStringProperties: characterEncoding=utf8 + + +## @section Apollo parameters + +apolloService: + enabled: true + meta: "dev" + configdb: + # apolloconfigdb host + host: "" + port: 3306 + dbName: ApolloConfigDB + # apolloconfigdb user name + userName: "1" + # apolloconfigdb password + password: "1" + connectionStringProperties: characterEncoding=utf8 + + configService: + name: apollo-configservice + fullNameOverride: "" + replicaCount: 2 + containerPort: 8080 + image: + repository: drycc-addons/apollo-configservice + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8080 + targetPort: 8080 + type: ClusterIP + ingress: + ingressClassName: null + enabled: false + annotations: { } + hosts: + - host: "" + paths: [ ] + tls: [ ] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + config: + # spring profiles to activate + profiles: "github,kubernetes" + # override apollo.config-service.url: config service url to be accessed by apollo-client + configServiceUrlOverride: "" + # override apollo.admin-service.url: admin service url to be accessed by apollo-portal + adminServiceUrlOverride: "" + # specify the context path, e.g. /apollo + contextPath: "" + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + + adminService: + name: apollo-adminservice + fullNameOverride: "" + replicaCount: 2 + containerPort: 8090 + image: + registry: registry.drycc.cc + repository: drycc-addons/apollo-adminservice + tag: "2.3" + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + service: + fullNameOverride: "" + port: 8090 + targetPort: 8090 + type: ClusterIP + ingress: + ingressClassName: null + enabled: false + annotations: { } + hosts: + - host: "" + paths: [ ] + tls: [ ] + liveness: + initialDelaySeconds: 100 + periodSeconds: 10 + readiness: + initialDelaySeconds: 30 + periodSeconds: 5 + config: + # spring profiles to activate + profiles: "github,kubernetes" + # specify the context path, e.g. /apollo + contextPath: "" + # environment variables passed to the container, e.g. JAVA_OPTS + env: {} + strategy: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + annotations: {} + diff --git a/addons/apollo-bk/2.3/meta.yaml b/addons/apollo-bk/2.3/meta.yaml new file mode 100644 index 00000000..edb96d83 --- /dev/null +++ b/addons/apollo-bk/2.3/meta.yaml @@ -0,0 +1,27 @@ +name: apollo +version: 2 +id: 06653a76-126d-4c9d-a929-e4841185ab68 +description: "apollo." +displayName: "apollo" +metadata: + displayName: "apollo" + provider: + name: drycc + supportURL: https://www.apolloconfig.com/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/apollo-adminservice +tags: apollo +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "portal.enabled" + required: false + description: "portal.enabled config for values.yaml" +- name: "portal.config" + required: false + description: "portal.config config for values.yaml" +- name: "portal.portaldb" + required: false + description: "portal.config config for values.yaml" +archive: false diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml new file mode 100644 index 00000000..e37ddd93 --- /dev/null +++ b/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml @@ -0,0 +1,43 @@ +credential: + {{ if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_WEB_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: WEB_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: WEB_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: USER + value: {{ .Values.auth.username }} + + {{- if (not .Values.auth.existingSecret) }} + - name: AIRFLOW_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-password }' + + - name: AIRFLOW_FERNET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-fernet-key }' + + - name: AIRFLOW_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .data.airflow-secret-key }' + {{- end }} diff --git a/addons/airflow/2/plans/standard-16c48g2w/create-instance-schema.json b/addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-16c48g2w/create-instance-schema.json rename to addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml new file mode 100644 index 00000000..a10be35d --- /dev/null +++ b/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g2w" +id: 75f949c8-8366-4805-aa8b-553de0ec6c24 +description: "airflow standard-1c2g2w plan which limit resources 2 workers per worker 1 core memory size 2Gi." +displayName: "standard-1c2g2w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml new file mode 100644 index 00000000..caeaba82 --- /dev/null +++ b/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml @@ -0,0 +1,60 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-airflow-standard-1c2g2w + +## @section Airflow web parameters + +web: + ## @param web.replicaCount Number of Airflow web replicas + ## + replicaCount: 1 + ## Airflow web resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param web.resources.limits The resources limits for the Airflow web containers + ## @param web.resources.requests The requested resources for the Airflow web containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi + +## @section Airflow scheduler parameters + +scheduler: + ## @param scheduler.replicaCount Number of scheduler replicas + ## + replicaCount: 1 + ## Airflow scheduler resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param scheduler.resources.limits The resources limits for the Airflow scheduler containers + ## @param scheduler.resources.requests The requested resources for the Airflow scheduler containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi + +## @section Airflow worker parameters + +worker: + ## @param worker.replicaCount Number of Airflow worker replicas + ## + replicaCount: 2 + ## Airflow worker resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param worker.resources.limits The resources limits for the Airflow worker containers + ## @param worker.resources.requests The requested resources for the Airflow worker containers + ## + resources: + limits: + cpu: 1000m + memory: 2048Mi + requests: + cpu: 500m + memory: 1024Mi diff --git a/addons/airflow/2/plans/standard-1c2g2w/create-instance-schema.json b/addons/clickhouse/24/plans/standard-16c64g1000/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-1c2g2w/create-instance-schema.json rename to addons/clickhouse/24/plans/standard-16c64g1000/instance-schema.json diff --git a/addons/airflow/2/plans/standard-24c64g7w/create-instance-schema.json b/addons/clickhouse/24/plans/standard-2c4g20/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-24c64g7w/create-instance-schema.json rename to addons/clickhouse/24/plans/standard-2c4g20/instance-schema.json diff --git a/addons/airflow/2/plans/standard-2c4g2w/create-instance-schema.json b/addons/clickhouse/24/plans/standard-32c64g12000/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-2c4g2w/create-instance-schema.json rename to addons/clickhouse/24/plans/standard-32c64g12000/instance-schema.json diff --git a/addons/airflow/2/plans/standard-4c16g2w/create-instance-schema.json b/addons/clickhouse/24/plans/standard-4c16g100/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-4c16g2w/create-instance-schema.json rename to addons/clickhouse/24/plans/standard-4c16g100/instance-schema.json diff --git a/addons/airflow/2/plans/standard-4c8g2w/create-instance-schema.json b/addons/clickhouse/24/plans/standard-8c32g500/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-4c8g2w/create-instance-schema.json rename to addons/clickhouse/24/plans/standard-8c32g500/instance-schema.json diff --git a/addons/airflow/2/plans/standard-8c32g2w/create-instance-schema.json b/addons/cloudbeaver/23/plans/standard-10/instance-schema.json similarity index 100% rename from addons/airflow/2/plans/standard-8c32g2w/create-instance-schema.json rename to addons/cloudbeaver/23/plans/standard-10/instance-schema.json diff --git a/addons/flink/1.17/plans/standard-2c4g5w/instance-schema.json b/addons/flink/1.17/plans/standard-2c4g5w/instance-schema.json new file mode 100644 index 00000000..c4b29c36 --- /dev/null +++ b/addons/flink/1.17/plans/standard-2c4g5w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "taskmanager": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d{2}|500)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/flink/1.17/plans/standard-4c8g5w/instance-schema.json b/addons/flink/1.17/plans/standard-4c8g5w/instance-schema.json new file mode 100644 index 00000000..4a2dbf10 --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c8g5w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "taskmanager": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:1Ti|(?:[1-9]\\d{0,2}|100[0-9]|101\\d|102[0-4])Gi)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json b/addons/fluentbit/2/plans/standard-1000m/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-16c48g2w/create-instance-schema.json rename to addons/fluentbit/2/plans/standard-1000m/instance-schema.json diff --git a/addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json b/addons/fluentbit/2/plans/standard-200m/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-1c2g2w/create-instance-schema.json rename to addons/fluentbit/2/plans/standard-200m/instance-schema.json diff --git a/addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json b/addons/fluentbit/2/plans/standard-500m/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-24c64g7w/create-instance-schema.json rename to addons/fluentbit/2/plans/standard-500m/instance-schema.json diff --git a/addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json b/addons/grafana/10/chart/grafana/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-2c4g2w/create-instance-schema.json rename to addons/grafana/10/chart/grafana/instance-schema.json diff --git a/addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json b/addons/kafka/3.6/plans/standard-16c32g3w/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-4c16g2w/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-16c32g3w/instance-schema.json diff --git a/addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json b/addons/kafka/3.6/plans/standard-1c2g3w/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-4c8g2w/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-1c2g3w/instance-schema.json diff --git a/addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json b/addons/kafka/3.6/plans/standard-24c64g3w/instance-schema.json similarity index 100% rename from addons/airflow/3/plans/standard-8c32g2w/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-24c64g3w/instance-schema.json diff --git a/addons/clickhouse/24/plans/standard-16c64g1000/create-instance-schema.json b/addons/kafka/3.6/plans/standard-2c4g3w/instance-schema.json similarity index 100% rename from addons/clickhouse/24/plans/standard-16c64g1000/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-2c4g3w/instance-schema.json diff --git a/addons/clickhouse/24/plans/standard-2c4g20/create-instance-schema.json b/addons/kafka/3.6/plans/standard-4c8g3w/instance-schema.json similarity index 100% rename from addons/clickhouse/24/plans/standard-2c4g20/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-4c8g3w/instance-schema.json diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json b/addons/kafka/3.6/plans/standard-8c16g3w/instance-schema.json similarity index 100% rename from addons/clickhouse/24/plans/standard-32c64g12000/create-instance-schema.json rename to addons/kafka/3.6/plans/standard-8c16g3w/instance-schema.json diff --git a/addons/clickhouse/24/plans/standard-4c16g100/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-16c32g1024/instance-schema.json similarity index 100% rename from addons/clickhouse/24/plans/standard-4c16g100/create-instance-schema.json rename to addons/kvrocks/2.10/plans/standard-16c32g1024/instance-schema.json diff --git a/addons/clickhouse/24/plans/standard-8c32g500/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-1c2g64/instance-schema.json similarity index 100% rename from addons/clickhouse/24/plans/standard-8c32g500/create-instance-schema.json rename to addons/kvrocks/2.10/plans/standard-1c2g64/instance-schema.json diff --git a/addons/cloudbeaver/23/plans/standard-10/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-2c4g128/instance-schema.json similarity index 100% rename from addons/cloudbeaver/23/plans/standard-10/create-instance-schema.json rename to addons/kvrocks/2.10/plans/standard-2c4g128/instance-schema.json diff --git a/addons/flink/1.17/plans/standard-2c4g5w/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-4c8g256/instance-schema.json similarity index 100% rename from addons/flink/1.17/plans/standard-2c4g5w/create-instance-schema.json rename to addons/kvrocks/2.10/plans/standard-4c8g256/instance-schema.json diff --git a/addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json b/addons/kvrocks/2.10/plans/standard-8c16g512/instance-schema.json similarity index 100% rename from addons/flink/1.17/plans/standard-4c8g5w/create-instance-schema.json rename to addons/kvrocks/2.10/plans/standard-8c16g512/instance-schema.json diff --git a/addons/fluentbit/2/plans/standard-1000m/create-instance-schema.json b/addons/kvrocks/2.8/plans/standard-16c32g1024/instance-schema.json similarity index 100% rename from addons/fluentbit/2/plans/standard-1000m/create-instance-schema.json rename to addons/kvrocks/2.8/plans/standard-16c32g1024/instance-schema.json diff --git a/addons/fluentbit/2/plans/standard-200m/create-instance-schema.json b/addons/kvrocks/2.8/plans/standard-1c2g64/instance-schema.json similarity index 100% rename from addons/fluentbit/2/plans/standard-200m/create-instance-schema.json rename to addons/kvrocks/2.8/plans/standard-1c2g64/instance-schema.json diff --git a/addons/fluentbit/2/plans/standard-500m/create-instance-schema.json b/addons/kvrocks/2.8/plans/standard-2c4g128/instance-schema.json similarity index 100% rename from addons/fluentbit/2/plans/standard-500m/create-instance-schema.json rename to addons/kvrocks/2.8/plans/standard-2c4g128/instance-schema.json diff --git a/addons/grafana/10/chart/grafana/create-instance-schema.json b/addons/kvrocks/2.8/plans/standard-4c8g256/instance-schema.json similarity index 100% rename from addons/grafana/10/chart/grafana/create-instance-schema.json rename to addons/kvrocks/2.8/plans/standard-4c8g256/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/create-instance-schema.json b/addons/kvrocks/2.8/plans/standard-8c16g512/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-16c32g3w/create-instance-schema.json rename to addons/kvrocks/2.8/plans/standard-8c16g512/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/create-instance-schema.json b/addons/lakefs/1.52/plans/standard-1c1g/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-1c2g3w/create-instance-schema.json rename to addons/lakefs/1.52/plans/standard-1c1g/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/create-instance-schema.json b/addons/lakefs/1.52/plans/standard-4c4g/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-24c64g3w/create-instance-schema.json rename to addons/lakefs/1.52/plans/standard-4c4g/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s1024/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-2c4g3w/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s1024/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s128/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-4c8g3w/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s128/instance-schema.json diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s2048/instance-schema.json similarity index 100% rename from addons/kafka/3.6/plans/standard-8c16g3w/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s2048/instance-schema.json diff --git a/addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s256/instance-schema.json similarity index 100% rename from addons/kvrocks/2.10/plans/standard-16c32g1024/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s256/instance-schema.json diff --git a/addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s3096/instance-schema.json similarity index 100% rename from addons/kvrocks/2.10/plans/standard-1c2g64/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s3096/instance-schema.json diff --git a/addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s32/instance-schema.json similarity index 100% rename from addons/kvrocks/2.10/plans/standard-2c4g128/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s32/instance-schema.json diff --git a/addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s512/instance-schema.json similarity index 100% rename from addons/kvrocks/2.10/plans/standard-4c8g256/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s512/instance-schema.json diff --git a/addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json b/addons/minio/2023/plans/standard-v4s64/instance-schema.json similarity index 100% rename from addons/kvrocks/2.10/plans/standard-8c16g512/create-instance-schema.json rename to addons/minio/2023/plans/standard-v4s64/instance-schema.json diff --git a/addons/kvrocks/2.8/plans/standard-16c32g1024/create-instance-schema.json b/addons/minio/2023/plans/standard-v6d4s1T/instance-schema.json similarity index 100% rename from addons/kvrocks/2.8/plans/standard-16c32g1024/create-instance-schema.json rename to addons/minio/2023/plans/standard-v6d4s1T/instance-schema.json diff --git a/addons/kvrocks/2.8/plans/standard-1c2g64/create-instance-schema.json b/addons/minio/2023/plans/standard-v8d4s1T/instance-schema.json similarity index 100% rename from addons/kvrocks/2.8/plans/standard-1c2g64/create-instance-schema.json rename to addons/minio/2023/plans/standard-v8d4s1T/instance-schema.json diff --git a/addons/kvrocks/2.8/plans/standard-2c4g128/create-instance-schema.json b/addons/minio/2023/plans/standard-v8d4s2T/instance-schema.json similarity index 100% rename from addons/kvrocks/2.8/plans/standard-2c4g128/create-instance-schema.json rename to addons/minio/2023/plans/standard-v8d4s2T/instance-schema.json diff --git a/addons/kvrocks/2.8/plans/standard-4c8g256/create-instance-schema.json b/addons/minio/2023/plans/standard-v8d4s3T/instance-schema.json similarity index 100% rename from addons/kvrocks/2.8/plans/standard-4c8g256/create-instance-schema.json rename to addons/minio/2023/plans/standard-v8d4s3T/instance-schema.json diff --git a/addons/kvrocks/2.8/plans/standard-8c16g512/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-16c64g400/instance-schema.json similarity index 100% rename from addons/kvrocks/2.8/plans/standard-8c16g512/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-16c64g400/instance-schema.json diff --git a/addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-1c2g10/instance-schema.json similarity index 100% rename from addons/lakefs/1.52/plans/standard-1c1g/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-1c2g10/instance-schema.json diff --git a/addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-2c4g20/instance-schema.json similarity index 100% rename from addons/lakefs/1.52/plans/standard-4c4g/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-2c4g20/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s1024/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-2c8g50/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s1024/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-2c8g50/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s128/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-32c128g800/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s128/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-32c128g800/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s2048/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-4c16g100/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s2048/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-4c16g100/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s256/create-instance-schema.json b/addons/mongodb/7.0/plans/standard-8c32g200/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s256/create-instance-schema.json rename to addons/mongodb/7.0/plans/standard-8c32g200/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s3096/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-16c64g400/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s3096/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-16c64g400/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s32/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-2c4g20/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s32/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-2c4g20/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s512/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-2c8g50/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s512/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-2c8g50/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v4s64/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-32c128g800/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v4s64/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-32c128g800/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v6d4s1T/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-4c16g100/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v6d4s1T/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-4c16g100/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v8d4s1T/create-instance-schema.json b/addons/mysql-cluster/8.0/plans/standard-8c32g200/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v8d4s1T/create-instance-schema.json rename to addons/mysql-cluster/8.0/plans/standard-8c32g200/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v8d4s2T/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-2c4g32/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v8d4s2T/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-2c4g32/instance-schema.json diff --git a/addons/minio/2023/plans/standard-v8d4s3T/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-2c4g64/instance-schema.json similarity index 100% rename from addons/minio/2023/plans/standard-v8d4s3T/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-2c4g64/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-16c64g400/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-4c16g256/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-16c64g400/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-4c16g256/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-1c2g10/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-4c16g512/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-1c2g10/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-4c16g512/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-2c4g20/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-4c8g128/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-2c4g20/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-4c8g128/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-2c8g50/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-8c32g1024/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-2c8g50/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-8c32g1024/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-32c128g800/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-8c32g2048/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-32c128g800/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-8c32g2048/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-4c16g100/create-instance-schema.json b/addons/opensearch/2.10/plans/standard-8c32g768/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-4c16g100/create-instance-schema.json rename to addons/opensearch/2.10/plans/standard-8c32g768/instance-schema.json diff --git a/addons/mongodb/7.0/plans/standard-8c32g200/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-2c4g32/instance-schema.json similarity index 100% rename from addons/mongodb/7.0/plans/standard-8c32g200/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-2c4g32/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-2c4g64/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-16c64g400/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-2c4g64/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c16g256/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-2c4g20/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-4c16g256/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-2c8g50/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c16g512/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-2c8g50/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-4c16g512/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-4c8g128/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-32c128g800/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-4c8g128/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g1024/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-4c16g100/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-8c32g1024/instance-schema.json diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g2048/instance-schema.json similarity index 100% rename from addons/mysql-cluster/8.0/plans/standard-8c32g200/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-8c32g2048/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/create-instance-schema.json b/addons/opensearch/3.0/plans/standard-8c32g768/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g32/create-instance-schema.json rename to addons/opensearch/3.0/plans/standard-8c32g768/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/create-instance-schema.json b/addons/pmm/2.41/plans/standard-2c2g10/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g64/create-instance-schema.json rename to addons/pmm/2.41/plans/standard-2c2g10/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-4c16g256/create-instance-schema.json b/addons/pmm/2.41/plans/standard-4c8g100/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-4c16g256/create-instance-schema.json rename to addons/pmm/2.41/plans/standard-4c8g100/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-4c16g512/create-instance-schema.json b/addons/pmm/2.41/plans/standard-8c16g200/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-4c16g512/create-instance-schema.json rename to addons/pmm/2.41/plans/standard-8c16g200/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-4c8g128/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-16c64g400/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-4c8g128/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-16c64g400/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-8c32g1024/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-2c4g20/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-8c32g1024/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-2c4g20/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-8c32g2048/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-2c8g50/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-8c32g2048/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-2c8g50/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-8c32g768/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-32c128g800/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-8c32g768/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-32c128g800/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-32c64g4000/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g32/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-32c64g4000/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-4c16g100/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g64/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-4c16g100/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json b/addons/postgresql-cluster/15/plans/standard-8c32g200/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-4c16g256/create-instance-schema.json rename to addons/postgresql-cluster/15/plans/standard-8c32g200/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-16c64g400/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-4c16g512/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-16c64g400/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-2c4g20/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-4c8g128/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-2c4g20/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-2c8g50/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-8c32g1024/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-2c8g50/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-32c128g800/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-8c32g2048/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-32c128g800/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-32c64g4000/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-8c32g768/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-32c64g4000/instance-schema.json diff --git a/addons/pmm/2.41/plans/standard-2c2g10/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-4c16g100/instance-schema.json similarity index 100% rename from addons/pmm/2.41/plans/standard-2c2g10/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-4c16g100/instance-schema.json diff --git a/addons/pmm/2.41/plans/standard-4c8g100/create-instance-schema.json b/addons/postgresql-cluster/16/plans/standard-8c32g200/instance-schema.json similarity index 100% rename from addons/pmm/2.41/plans/standard-4c8g100/create-instance-schema.json rename to addons/postgresql-cluster/16/plans/standard-8c32g200/instance-schema.json diff --git a/addons/pmm/2.41/plans/standard-8c16g200/create-instance-schema.json b/addons/prometheus/2/plans/standard-16c32g500/instance-schema.json similarity index 100% rename from addons/pmm/2.41/plans/standard-8c16g200/create-instance-schema.json rename to addons/prometheus/2/plans/standard-16c32g500/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/create-instance-schema.json b/addons/prometheus/2/plans/standard-1c1g10/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-16c64g400/create-instance-schema.json rename to addons/prometheus/2/plans/standard-1c1g10/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/create-instance-schema.json b/addons/prometheus/2/plans/standard-2c4g50/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-2c4g20/create-instance-schema.json rename to addons/prometheus/2/plans/standard-2c4g50/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/create-instance-schema.json b/addons/prometheus/2/plans/standard-4c8g100/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-2c8g50/create-instance-schema.json rename to addons/prometheus/2/plans/standard-4c8g100/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/create-instance-schema.json b/addons/prometheus/2/plans/standard-8c16g200/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-32c128g800/create-instance-schema.json rename to addons/prometheus/2/plans/standard-8c16g200/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/create-instance-schema.json b/addons/rabbitmq/3.12/plans/standard-16c32g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-32c64g4000/create-instance-schema.json rename to addons/rabbitmq/3.12/plans/standard-16c32g3w/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/create-instance-schema.json b/addons/rabbitmq/3.12/plans/standard-2c4g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-4c16g100/create-instance-schema.json rename to addons/rabbitmq/3.12/plans/standard-2c4g3w/instance-schema.json diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/create-instance-schema.json b/addons/rabbitmq/3.12/plans/standard-4c8g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/15/plans/standard-8c32g200/create-instance-schema.json rename to addons/rabbitmq/3.12/plans/standard-4c8g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json b/addons/rabbitmq/3.12/plans/standard-8c16g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-16c64g400/create-instance-schema.json rename to addons/rabbitmq/3.12/plans/standard-8c16g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-16c32g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-2c4g20/create-instance-schema.json rename to addons/rabbitmq/4.0/plans/standard-16c32g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-2c4g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-2c8g50/create-instance-schema.json rename to addons/rabbitmq/4.0/plans/standard-2c4g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-4c8g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-32c128g800/create-instance-schema.json rename to addons/rabbitmq/4.0/plans/standard-4c8g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json b/addons/rabbitmq/4.0/plans/standard-8c16g3w/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-32c64g4000/create-instance-schema.json rename to addons/rabbitmq/4.0/plans/standard-8c16g3w/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-1024/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-4c16g100/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-1024/instance-schema.json diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-128/instance-schema.json similarity index 100% rename from addons/postgresql-cluster/16/plans/standard-8c32g200/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-128/instance-schema.json diff --git a/addons/prometheus/2/plans/standard-16c32g500/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-16384/instance-schema.json similarity index 100% rename from addons/prometheus/2/plans/standard-16c32g500/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-16384/instance-schema.json diff --git a/addons/prometheus/2/plans/standard-1c1g10/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-2048/instance-schema.json similarity index 100% rename from addons/prometheus/2/plans/standard-1c1g10/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-2048/instance-schema.json diff --git a/addons/prometheus/2/plans/standard-2c4g50/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-256/instance-schema.json similarity index 100% rename from addons/prometheus/2/plans/standard-2c4g50/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-256/instance-schema.json diff --git a/addons/prometheus/2/plans/standard-4c8g100/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-32768/instance-schema.json similarity index 100% rename from addons/prometheus/2/plans/standard-4c8g100/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-32768/instance-schema.json diff --git a/addons/prometheus/2/plans/standard-8c16g200/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-4096/instance-schema.json similarity index 100% rename from addons/prometheus/2/plans/standard-8c16g200/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-4096/instance-schema.json diff --git a/addons/rabbitmq/3.12/plans/standard-16c32g3w/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-512/instance-schema.json similarity index 100% rename from addons/rabbitmq/3.12/plans/standard-16c32g3w/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-512/instance-schema.json diff --git a/addons/rabbitmq/3.12/plans/standard-2c4g3w/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-65536/instance-schema.json similarity index 100% rename from addons/rabbitmq/3.12/plans/standard-2c4g3w/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-65536/instance-schema.json diff --git a/addons/rabbitmq/3.12/plans/standard-4c8g3w/create-instance-schema.json b/addons/redis-cluster/7.0/plans/standard-8192/instance-schema.json similarity index 100% rename from addons/rabbitmq/3.12/plans/standard-4c8g3w/create-instance-schema.json rename to addons/redis-cluster/7.0/plans/standard-8192/instance-schema.json diff --git a/addons/rabbitmq/3.12/plans/standard-8c16g3w/create-instance-schema.json b/addons/redis/7.0/plans/standard-1024/instance-schema.json similarity index 100% rename from addons/rabbitmq/3.12/plans/standard-8c16g3w/create-instance-schema.json rename to addons/redis/7.0/plans/standard-1024/instance-schema.json diff --git a/addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json b/addons/redis/7.0/plans/standard-128/instance-schema.json similarity index 100% rename from addons/rabbitmq/4.0/plans/standard-16c32g3w/create-instance-schema.json rename to addons/redis/7.0/plans/standard-128/instance-schema.json diff --git a/addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json b/addons/redis/7.0/plans/standard-131072/instance-schema.json similarity index 100% rename from addons/rabbitmq/4.0/plans/standard-2c4g3w/create-instance-schema.json rename to addons/redis/7.0/plans/standard-131072/instance-schema.json diff --git a/addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json b/addons/redis/7.0/plans/standard-16384/instance-schema.json similarity index 100% rename from addons/rabbitmq/4.0/plans/standard-4c8g3w/create-instance-schema.json rename to addons/redis/7.0/plans/standard-16384/instance-schema.json diff --git a/addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json b/addons/redis/7.0/plans/standard-2048/instance-schema.json similarity index 100% rename from addons/rabbitmq/4.0/plans/standard-8c16g3w/create-instance-schema.json rename to addons/redis/7.0/plans/standard-2048/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-1024/create-instance-schema.json b/addons/redis/7.0/plans/standard-256/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-1024/create-instance-schema.json rename to addons/redis/7.0/plans/standard-256/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-128/create-instance-schema.json b/addons/redis/7.0/plans/standard-32768/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-128/create-instance-schema.json rename to addons/redis/7.0/plans/standard-32768/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-16384/create-instance-schema.json b/addons/redis/7.0/plans/standard-4096/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-16384/create-instance-schema.json rename to addons/redis/7.0/plans/standard-4096/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-2048/create-instance-schema.json b/addons/redis/7.0/plans/standard-512/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-2048/create-instance-schema.json rename to addons/redis/7.0/plans/standard-512/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-256/create-instance-schema.json b/addons/redis/7.0/plans/standard-65536/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-256/create-instance-schema.json rename to addons/redis/7.0/plans/standard-65536/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-32768/create-instance-schema.json b/addons/redis/7.0/plans/standard-8192/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-32768/create-instance-schema.json rename to addons/redis/7.0/plans/standard-8192/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-4096/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s1024/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-4096/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s1024/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-512/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s128/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-512/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s128/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-65536/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s2048/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-65536/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s2048/instance-schema.json diff --git a/addons/redis-cluster/7.0/plans/standard-8192/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s256/instance-schema.json similarity index 100% rename from addons/redis-cluster/7.0/plans/standard-8192/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s256/instance-schema.json diff --git a/addons/redis/7.0/plans/standard-1024/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s3072/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-1024/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s3072/instance-schema.json diff --git a/addons/redis/7.0/plans/standard-128/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s32/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-128/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s32/instance-schema.json diff --git a/addons/seaweedfs/3/plans/standard-v4s512/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s512/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/seaweedfs/3/plans/standard-v4s512/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-131072/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s512/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-131072/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s512/instance-schema.json diff --git a/addons/seaweedfs/3/plans/standard-v4s64/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s64/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/seaweedfs/3/plans/standard-v4s64/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-16384/create-instance-schema.json b/addons/seaweedfs/3/plans/standard-v4s64/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-16384/create-instance-schema.json rename to addons/seaweedfs/3/plans/standard-v4s64/instance-schema.json diff --git a/addons/spark/3.4/plans/standard-16c32g5w/create-instance-schema.json b/addons/spark/3.4/plans/standard-16c32g5w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-16c32g5w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-16c32g5w/instance-schema.json b/addons/spark/3.4/plans/standard-16c32g5w/instance-schema.json new file mode 100644 index 00000000..c670c165 --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c32g5w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1[0-4]\\d{2}|15(?:0\\d|1\\d|2\\d|3[0-6]))Gi|1(?:.[0-5])?Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1[0-4]\\d{2}|15(?:0\\d|1\\d|2\\d|3[0-6]))Gi|1(?:.[0-5])?Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json b/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-16c64g5w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-16c64g5w/instance-schema.json b/addons/spark/3.4/plans/standard-16c64g5w/instance-schema.json new file mode 100644 index 00000000..992af817 --- /dev/null +++ b/addons/spark/3.4/plans/standard-16c64g5w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|20(?:[0-3]\\d|4[0-8]))Gi|(?:1(?:.\\d)?|2(?:.0)?)Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|20(?:[0-3]\\d|4[0-8]))Gi|(?:1(?:.\\d)?|2(?:.0)?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-1c2g2w/create-instance-schema.json b/addons/spark/3.4/plans/standard-1c2g2w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-1c2g2w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-1c2g2w/instance-schema.json b/addons/spark/3.4/plans/standard-1c2g2w/instance-schema.json new file mode 100644 index 00000000..0d13859b --- /dev/null +++ b/addons/spark/3.4/plans/standard-1c2g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|100)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|100)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-24c48g5w/create-instance-schema.json b/addons/spark/3.4/plans/standard-24c48g5w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-24c48g5w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-24c48g5w/instance-schema.json b/addons/spark/3.4/plans/standard-24c48g5w/instance-schema.json new file mode 100644 index 00000000..63b2e050 --- /dev/null +++ b/addons/spark/3.4/plans/standard-24c48g5w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|2(?:[0-4]\\d{2}|5(?:[0-5]\\d|60)))Gi|(?:1(?:.\\d)?|2(?:.[0-5])?)Ti)$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:(?:[1-9]\\d{0,2}|1\\d{3}|2(?:[0-4]\\d{2}|5(?:[0-5]\\d|60)))Gi|(?:1(?:.\\d)?|2(?:.[0-5])?)Ti)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-2c4g2w/create-instance-schema.json b/addons/spark/3.4/plans/standard-2c4g2w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-2c4g2w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-2c4g2w/instance-schema.json b/addons/spark/3.4/plans/standard-2c4g2w/instance-schema.json new file mode 100644 index 00000000..0178d905 --- /dev/null +++ b/addons/spark/3.4/plans/standard-2c4g2w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|1\\d\\d|200)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|1\\d\\d|200)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-4c8g3w/create-instance-schema.json b/addons/spark/3.4/plans/standard-4c8g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-4c8g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-4c8g3w/instance-schema.json b/addons/spark/3.4/plans/standard-4c8g3w/instance-schema.json new file mode 100644 index 00000000..f2b29959 --- /dev/null +++ b/addons/spark/3.4/plans/standard-4c8g3w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d\\d|500)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d?|[1-4]\\d\\d|500)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-8c16g3w/create-instance-schema.json b/addons/spark/3.4/plans/standard-8c16g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/spark/3.4/plans/standard-8c16g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/spark/3.4/plans/standard-8c16g3w/instance-schema.json b/addons/spark/3.4/plans/standard-8c16g3w/instance-schema.json new file mode 100644 index 00000000..9abbe349 --- /dev/null +++ b/addons/spark/3.4/plans/standard-8c16g3w/instance-schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "master": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d{0,2}|1000)Gi$" + } + } + } + } + }, + "worker": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:[1-9]\\d{0,2}|1000)Gi$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/victoriametrics/1/plans/standard-16c32g500/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-2048/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-16c32g500/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-2048/create-instance-schema.json rename to addons/victoriametrics/1/plans/standard-16c32g500/instance-schema.json diff --git a/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/victoriametrics/1/plans/standard-1c1g10/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-256/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-1c1g10/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-256/create-instance-schema.json rename to addons/victoriametrics/1/plans/standard-1c1g10/instance-schema.json diff --git a/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/victoriametrics/1/plans/standard-2c4g50/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-32768/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-2c4g50/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-32768/create-instance-schema.json rename to addons/victoriametrics/1/plans/standard-2c4g50/instance-schema.json diff --git a/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/victoriametrics/1/plans/standard-4c8g100/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-4096/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-4c8g100/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-4096/create-instance-schema.json rename to addons/victoriametrics/1/plans/standard-4c8g100/instance-schema.json diff --git a/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/victoriametrics/1/plans/standard-8c16g200/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-512/create-instance-schema.json b/addons/victoriametrics/1/plans/standard-8c16g200/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-512/create-instance-schema.json rename to addons/victoriametrics/1/plans/standard-8c16g200/instance-schema.json diff --git a/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/yugabytedb/2024/plans/standard-1c2g3w10/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-65536/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-1c2g3w10/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-65536/create-instance-schema.json rename to addons/yugabytedb/2024/plans/standard-1c2g3w10/instance-schema.json diff --git a/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/yugabytedb/2024/plans/standard-2c4g3w20/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/redis/7.0/plans/standard-8192/create-instance-schema.json b/addons/yugabytedb/2024/plans/standard-2c4g3w20/instance-schema.json similarity index 100% rename from addons/redis/7.0/plans/standard-8192/create-instance-schema.json rename to addons/yugabytedb/2024/plans/standard-2c4g3w20/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-16c32g3w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-16c32g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-16c32g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s1024/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-16c32g3w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s1024/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-16c32g3w/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-1c2g3w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-1c2g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-1c2g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s128/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-1c2g3w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s128/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-1c2g3w/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-2c4g3w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-2c4g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-2c4g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s2048/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-2c4g3w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s2048/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-2c4g3w/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-2c4g5w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-2c4g5w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-2c4g5w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s256/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-2c4g5w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s256/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-2c4g5w/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-4c8g3w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-4c8g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-4c8g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s3072/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-4c8g3w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s3072/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-4c8g3w/instance-schema.json diff --git a/addons/zookeeper/3.9/plans/standard-8c16g3w/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-8c16g3w/create-instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/zookeeper/3.9/plans/standard-8c16g3w/create-instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/seaweedfs/3/plans/standard-v4s32/create-instance-schema.json b/addons/zookeeper/3.9/plans/standard-8c16g3w/instance-schema.json similarity index 100% rename from addons/seaweedfs/3/plans/standard-v4s32/create-instance-schema.json rename to addons/zookeeper/3.9/plans/standard-8c16g3w/instance-schema.json From 3eb17946a2b54bc20d4e6933dcc49af5665ac645 Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 2 Sep 2025 13:58:04 +0800 Subject: [PATCH 69/93] chore(fluentbit): add custom input path (#116) --- .gitignore | 1 + .../2/chart/fluentbit/templates/_helpers.tpl | 19 +++++++++++++++++++ .../fluentbit/2/chart/fluentbit/values.yaml | 7 ++++--- addons/fluentbit/2/meta.yaml | 3 +++ .../10/chart/grafana/templates/configmap.yaml | 1 + 5 files changed, 28 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index b097d5f2..1b3100bd 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,4 @@ Chart.lock *.fix addons/grafana/10/dashborad/ addons/prometheus/prom-value.yaml +toCopy/ diff --git a/addons/fluentbit/2/chart/fluentbit/templates/_helpers.tpl b/addons/fluentbit/2/chart/fluentbit/templates/_helpers.tpl index 01f7bd0e..a56088d0 100644 --- a/addons/fluentbit/2/chart/fluentbit/templates/_helpers.tpl +++ b/addons/fluentbit/2/chart/fluentbit/templates/_helpers.tpl @@ -60,3 +60,22 @@ Return the Fluentbit Reloader image name {{- include "fluentbit.image" . -}} {{- end -}} {{- end -}} + +{{/* +Return the Fluentbit input path +*/}} +{{- define "input.paths" -}} + {{- $namespace := .Release.Namespace -}} + {{- $wildcards := .Values.daemonset.config.podWildcards -}} + {{- $pathTemplate := "/var/log/containers/%s_%s_*.log" -}} + + {{/* */}} + {{- $paths := list -}} + {{- range $wildcard := $wildcards -}} + {{- $path := printf $pathTemplate $wildcard $namespace -}} + {{- $paths = append $paths $path -}} + {{- end -}} + {{- join "," $paths -}} +{{- end -}} + + diff --git a/addons/fluentbit/2/chart/fluentbit/values.yaml b/addons/fluentbit/2/chart/fluentbit/values.yaml index 52f9d738..b34a989e 100644 --- a/addons/fluentbit/2/chart/fluentbit/values.yaml +++ b/addons/fluentbit/2/chart/fluentbit/values.yaml @@ -164,8 +164,9 @@ daemonset: type: OnDelete flush: 1 ## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file - config: - + config: + podWildcards: + - "*" service: | [SERVICE] Flush {{ .Values.daemonset.flush }} @@ -181,7 +182,7 @@ daemonset: inputs: | [INPUT] Name tail - Path /var/log/containers/*_{{ .Release.Namespace }}_*.log + Path {{ include "input.paths" . }} DB /data/containers.pos.db DB.locking true Offset_Key offset diff --git a/addons/fluentbit/2/meta.yaml b/addons/fluentbit/2/meta.yaml index f4684883..e6d64b97 100644 --- a/addons/fluentbit/2/meta.yaml +++ b/addons/fluentbit/2/meta.yaml @@ -18,6 +18,9 @@ allow_parameters: - name: "daemonset.extraEnvVars" required: false description: "extra environment variables to add to fluentbit" +- name: "daemonset.config.podWildcards" + equired: false + description: "fluentbit inout paths pod wildcards" - name: "daemonset.config.outputs" required: true description: "destinations for your data: databases, cloud services and more" diff --git a/addons/grafana/10/chart/grafana/templates/configmap.yaml b/addons/grafana/10/chart/grafana/templates/configmap.yaml index 1d5393ce..a6abcea2 100644 --- a/addons/grafana/10/chart/grafana/templates/configmap.yaml +++ b/addons/grafana/10/chart/grafana/templates/configmap.yaml @@ -32,3 +32,4 @@ data: GF_PATHS_CONFIG: "/opt/drycc/grafana/conf/grafana.ini" GF_PATHS_DATA: "/opt/drycc/grafana/data" GF_PATHS_LOGS: "/opt/drycc/grafana/logs" + From 08eca72d3b154f88da79f46b03f6be18d31174ac Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 2 Sep 2025 13:59:16 +0800 Subject: [PATCH 70/93] chore(addons): remove apolloconfig --- addons/apollo-bk/2.3/chart/apollo/.helmignore | 21 -- addons/apollo-bk/2.3/chart/apollo/1.yaml | 260 ----------------- addons/apollo-bk/2.3/chart/apollo/Chart.yaml | 25 -- addons/apollo-bk/2.3/chart/apollo/README.md | 0 .../2.3/chart/apollo/templates/NOTES.txt | 0 .../2.3/chart/apollo/templates/_helpers.tpl | 133 --------- .../apollo/templates/adminservice/NOTES.txt | 32 -- .../templates/adminservice/deployment.yaml | 88 ------ .../templates/adminservice/ingress.yaml | 63 ---- .../apollo/templates/adminservice/secret.yaml | 20 -- .../templates/adminservice/service.yaml | 22 -- .../apollo/templates/configservice/NOTES.txt | 32 -- .../templates/configservice/deployment.yaml | 88 ------ .../templates/configservice/ingress.yaml | 63 ---- .../templates/configservice/secret.yaml | 22 -- .../templates/configservice/service.yaml | 22 -- .../chart/apollo/templates/portal/NOTES.txt | 25 -- .../apollo/templates/portal/deployment.yaml | 102 ------- .../apollo/templates/portal/ingress.yaml | 64 ---- .../chart/apollo/templates/portal/secret.yaml | 36 --- .../apollo/templates/portal/service.yaml | 23 -- addons/apollo-bk/2.3/chart/apollo/values.yaml | 273 ------------------ addons/apollo-bk/2.3/meta.yaml | 27 -- .../2.3/plans/standard-1c2g2w/bind.yaml | 43 --- .../standard-1c2g2w/instance-schema.json | 12 - .../2.3/plans/standard-1c2g2w/meta.yaml | 6 - .../2.3/plans/standard-1c2g2w/values.yaml | 60 ---- 27 files changed, 1562 deletions(-) delete mode 100644 addons/apollo-bk/2.3/chart/apollo/.helmignore delete mode 100644 addons/apollo-bk/2.3/chart/apollo/1.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/Chart.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/README.md delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml delete mode 100644 addons/apollo-bk/2.3/chart/apollo/values.yaml delete mode 100644 addons/apollo-bk/2.3/meta.yaml delete mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml delete mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json delete mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml delete mode 100644 addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml diff --git a/addons/apollo-bk/2.3/chart/apollo/.helmignore b/addons/apollo-bk/2.3/chart/apollo/.helmignore deleted file mode 100644 index f0c13194..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/addons/apollo-bk/2.3/chart/apollo/1.yaml b/addons/apollo-bk/2.3/chart/apollo/1.yaml deleted file mode 100644 index c6edc2b8..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/1.yaml +++ /dev/null @@ -1,260 +0,0 @@ ---- -# Source: apollo/templates/adminservice/secret.yaml -kind: Secret -apiVersion: v1 -metadata: - name: release-name-apollo-adminservice -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 ---- -# Source: apollo/templates/portal/secret.yaml -kind: Secret -apiVersion: v1 -metadata: - name: release-name-apollo-portal -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloPortalDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 - apollo.portal.envs = dev - apollo-env.properties: | - dev = release-name-apollo-configservice:8080 ---- -# Source: apollo/templates/configservice/secret.yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: release-name-apollo-configservice -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://:3306/ApolloConfigDB?characterEncoding=utf8 - spring.datasource.username = 1 - spring.datasource.password = 1 - apollo.config-service.url = http://release-name-apollo-configservice.default:8080 - apollo.admin-service.url = http://release-name-apollo-adminservice.default:8090 ---- -# Source: apollo/templates/adminservice/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-adminservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8090 - targetPort: 8090 - selector: - app: release-name-apollo-adminservice ---- -# Source: apollo/templates/configservice/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-configservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - selector: - app: release-name-apollo-configservice ---- -# Source: apollo/templates/portal/service.yaml -kind: Service -apiVersion: v1 -metadata: - name: release-name-apollo-portal - labels: - app.kubernetes.io/version: "2.3.0" -spec: - type: ClusterIP - ports: - - name: http - protocol: TCP - port: 8070 - targetPort: 8070 - selector: - app: release-name-apollo-portal - sessionAffinity: ClientIP ---- -# Source: apollo/templates/adminservice/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-adminservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 2 - selector: - matchLabels: - app: release-name-apollo-adminservice - template: - metadata: - labels: - app: release-name-apollo-adminservice - spec: - volumes: - - name: volume-configmap-release-name-apollo-adminservice - configMap: - name: release-name-apollo-adminservice - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: apollo-adminservice - image: "drycc-addons/apollo-adminservice:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8090 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,kubernetes" - volumeMounts: - - name: volume-configmap-release-name-apollo-adminservice - mountPath: /apollo-adminservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: 8090 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8090 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/configservice/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-configservice - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 2 - selector: - matchLabels: - app: release-name-apollo-configservice - template: - metadata: - labels: - app: release-name-apollo-configservice - spec: - volumes: - - name: volume-configmap-release-name-apollo-configservice - configMap: - name: release-name-apollo-configservice - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: apollo-configservice - image: "drycc-addons/apollo-configservice:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8080 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,kubernetes" - volumeMounts: - - name: volume-configmap-release-name-apollo-configservice - mountPath: /apollo-configservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/portal/deployment.yaml -kind: Deployment -apiVersion: apps/v1 -metadata: - name: release-name-apollo-portal - labels: - app.kubernetes.io/version: "2.3.0" -spec: - replicas: 1 - selector: - matchLabels: - app: release-name-apollo-portal - template: - metadata: - labels: - app: release-name-apollo-portal - spec: - volumes: - - name: secret-release-name-apollo-portal - Secret: - name: release-name-apollo-portal - items: - - key: application-github.properties - path: application-github.properties - - key: apollo-env.properties - path: apollo-env.properties - defaultMode: 420 - containers: - - name: apollo-portal - image: "drycc-addons/apollo-portal:2.3" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8070 - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: "github,auth" - volumeMounts: - - name: secret-release-name-apollo-portal - mountPath: /apollo-portal/config/application-github.properties - subPath: application-github.properties - - name: secret-release-name-apollo-portal - mountPath: /apollo-portal/config/apollo-env.properties - subPath: apollo-env.properties - livenessProbe: - tcpSocket: - port: 8070 - initialDelaySeconds: 100 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /health - port: 8070 - initialDelaySeconds: 30 - periodSeconds: 5 - resources: - {} ---- -# Source: apollo/templates/portal/ingress.yaml -# diff --git a/addons/apollo-bk/2.3/chart/apollo/Chart.yaml b/addons/apollo-bk/2.3/chart/apollo/Chart.yaml deleted file mode 100644 index 07bdb346..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/Chart.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright Drycc Community. -# SPDX-License-Identifier: APACHE-2.0 - -annotations: - category: config - licenses: Apache-2.0 -apiVersion: v2 -appVersion: "2.3.0" -dependencies: -- name: common - repository: oci://registry.drycc.cc/charts - version: ~1.1.3 -description: A Helm chart for Apollo Config Service and Apollo Admin Service -home: https://github.com/apolloconfig/apollo -icon: https://raw.githubusercontent.com/apolloconfig/apollo/master/apollo-portal/src/main/resources/static/img/logo-simple.png -keywords: -- apollo -- apolloconfig -maintainers: -- name: Drycc Community. - url: https://github.com/drycc-addons/addons -name: apollo -sources: -- https://github.com/drycc-addons/addons -version: 0.1.0 diff --git a/addons/apollo-bk/2.3/chart/apollo/README.md b/addons/apollo-bk/2.3/chart/apollo/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/NOTES.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl b/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl deleted file mode 100644 index 6baef133..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/_helpers.tpl +++ /dev/null @@ -1,133 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - - -{{/* -Full name for portal service -*/}} -{{- define "apollo.portal.fullName" -}} -{{- if .Values.portal.fullNameOverride -}} -{{- .Values.portal.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.portal.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.portal.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - - -{{/* -Common labels -*/}} -{{- define "apollo.labels" -}} -{{- if .Chart.AppVersion -}} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -{{- end -}} - -{{/* -Service name for portal -*/}} -{{- define "apollo.portal.serviceName" -}} -{{- if .Values.portal.service.fullNameOverride -}} -{{- .Values.portal.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.portal.fullName" .}} -{{- end -}} -{{- end -}} - - -{{/* vim: set filetype=mustache: */}} - -{{/* -Service name for configdb -*/}} -{{- define "apollo.configdb.serviceName" -}} -{{- .Values.apolloService.configdb.host -}} -{{- end -}} - -{{/* -Service port for configdb -*/}} -{{- define "apollo.configdb.servicePort" -}} -{{- if .Values.apolloService.configdb.service.enabled -}} -{{- .Values.apolloService.configdb.service.port -}} -{{- else -}} -{{- .Values.apolloService.configdb.port -}} -{{- end -}} -{{- end -}} - -{{/* -Full name for config service -*/}} -{{- define "apollo.configService.fullName" -}} -{{- if .Values.apolloService.configService.fullNameOverride -}} -{{- .Values.apolloService.configService.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.apolloService.configService.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.apolloService.configService.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Service name for config service -*/}} -{{- define "apollo.configService.serviceName" -}} -{{- if .Values.apolloService.configService.service.fullNameOverride -}} -{{- .Values.apolloService.configService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.configService.fullName" .}} -{{- end -}} -{{- end -}} - -{{/* -Config service url to be accessed by apollo-client -*/}} -{{- define "apollo.configService.serviceUrl" -}} -{{- if .Values.apolloService.configService.config.configServiceUrlOverride -}} -{{ .Values.apolloService.configService.config.configServiceUrlOverride }} -{{- else -}} -http://{{ include "apollo.configService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- end -}} -{{- end -}} - -{{/* -Full name for admin service -*/}} -{{- define "apollo.adminService.fullName" -}} -{{- if .Values.apolloService.adminService.fullNameOverride -}} -{{- .Values.apolloService.adminService.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- if contains .Values.apolloService.adminService.name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name .Values.apolloService.adminService.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Service name for admin service -*/}} -{{- define "apollo.adminService.serviceName" -}} -{{- if .Values.apolloService.adminService.service.fullNameOverride -}} -{{- .Values.apolloService.adminService.service.fullNameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "apollo.adminService.fullName" .}} -{{- end -}} -{{- end -}} - -{{/* -Admin service url to be accessed by apollo-portal -*/}} -{{- define "apollo.adminService.serviceUrl" -}} -{{- if .Values.apolloService.configService.config.adminServiceUrlOverride -}} -{{ .Values.apolloService.configService.config.adminServiceUrlOverride -}} -{{- else -}} -http://{{ include "apollo.adminService.serviceName" .}}.{{ .Release.Namespace }}:{{ .Values.apolloService.adminService.service.port }}{{ .Values.apolloService.adminService.config.contextPath }} -{{- end -}} -{{- end -}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt deleted file mode 100644 index 78ce9341..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/NOTES.txt +++ /dev/null @@ -1,32 +0,0 @@ -Meta service url for current release: -{{- if contains "NodePort" .Values.apolloService.configService.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} - echo {{ include "apollo.configService.serviceUrl" .}} - -For local test use: - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 -{{- end }} - -{{- if .Values.apolloService.configService.ingress.enabled }} - -Ingress: -{{- range $host := .Values.apolloService.configService.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} - -Urls registered to meta service: -Config service: {{ include "apollo.configService.serviceUrl" .}} -Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml deleted file mode 100644 index d115a3c3..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $adminServiceFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.apolloService.adminService.replicaCount }} - selector: - matchLabels: - app: {{ $adminServiceFullName }} - {{- with .Values.apolloService.adminService.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $adminServiceFullName }} - {{- with .Values.apolloService.adminService.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.apolloService.adminService.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: volume-configmap-{{ $adminServiceFullName }} - configMap: - name: {{ $adminServiceFullName }} - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: {{ .Values.apolloService.adminService.name }} - image: "{{ .Values.apolloService.adminService.image.repository }}:{{ .Values.apolloService.adminService.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.apolloService.adminService.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.apolloService.adminService.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.apolloService.adminService.config.profiles | quote }} - {{- range $key, $value := .Values.apolloService.adminService.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: volume-configmap-{{ $adminServiceFullName }} - mountPath: /apollo-adminservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: {{ .Values.apolloService.adminService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.adminService.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.adminService.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.apolloService.adminService.config.contextPath }}/health - port: {{ .Values.apolloService.adminService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.adminService.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.adminService.readiness.periodSeconds }} - resources: - {{- toYaml .Values.apolloService.adminService.resources | nindent 12 }} - {{- with .Values.apolloService.adminService.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.adminService.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.adminService.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml deleted file mode 100644 index 1f5efcdf..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/ingress.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- if .Values.apolloService.adminService.ingress.enabled -}} -{{- $fullName := include "apollo.adminService.fullName" . -}} -{{- $svcPort := .Values.apolloService.adminService.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.apolloService.adminService.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.apolloService.adminService.ingress.ingressClassName }} - ingressClassName: {{ .Values.apolloService.adminService.ingress.ingressClassName }} -{{- end }} -{{- if .Values.apolloService.adminService.ingress.tls }} - tls: - {{- range .Values.apolloService.adminService.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.apolloService.adminService.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml deleted file mode 100644 index d7f35e89..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/secret.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $adminServiceFullName := include "apollo.adminService.fullName" . }} -kind: Secret -apiVersion: v1 -metadata: - name: {{ $adminServiceFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} - spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} - {{- if .Values.apolloService.adminService.config.contextPath }} - server.servlet.context-path = {{ .Values.apolloService.adminService.config.contextPath }} - {{- end }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml deleted file mode 100644 index ff44f6cc..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/adminservice/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.adminService.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.apolloService.adminService.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.apolloService.adminService.service.port }} - targetPort: {{ .Values.apolloService.adminService.service.targetPort }} - selector: - app: {{ include "apollo.adminService.fullName" . }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt deleted file mode 100644 index 78ce9341..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/NOTES.txt +++ /dev/null @@ -1,32 +0,0 @@ -Meta service url for current release: -{{- if contains "NodePort" .Values.apolloService.configService.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.configService.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "LoadBalancer" .Values.apolloService.configService.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.configService.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.configService.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.apolloService.configService.service.port }}{{ .Values.apolloService.configService.config.contextPath }} -{{- else if contains "ClusterIP" .Values.apolloService.configService.service.type }} - echo {{ include "apollo.configService.serviceUrl" .}} - -For local test use: - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.configService.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo http://127.0.0.1:8080{{ .Values.apolloService.configService.config.contextPath }} - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080 -{{- end }} - -{{- if .Values.apolloService.configService.ingress.enabled }} - -Ingress: -{{- range $host := .Values.apolloService.configService.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.apolloService.configService.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} - -Urls registered to meta service: -Config service: {{ include "apollo.configService.serviceUrl" .}} -Admin service: {{ include "apollo.adminService.serviceUrl" .}} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml deleted file mode 100644 index 947e8eb9..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $configServiceFullName := include "apollo.configService.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $configServiceFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.apolloService.configService.replicaCount }} - selector: - matchLabels: - app: {{ $configServiceFullName }} - {{- with .Values.apolloService.configService.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $configServiceFullName }} - {{- with .Values.apolloService.configService.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.apolloService.configService.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: volume-configmap-{{ $configServiceFullName }} - configMap: - name: {{ $configServiceFullName }} - items: - - key: application-github.properties - path: application-github.properties - defaultMode: 420 - containers: - - name: {{ .Values.apolloService.configService.name }} - image: "{{ .Values.apolloService.configService.image.repository }}:{{ .Values.apolloService.configService.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.apolloService.configService.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.apolloService.configService.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.apolloService.configService.config.profiles | quote }} - {{- range $key, $value := .Values.apolloService.configService.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: volume-configmap-{{ $configServiceFullName }} - mountPath: /apollo-configservice/config/application-github.properties - subPath: application-github.properties - livenessProbe: - tcpSocket: - port: {{ .Values.apolloService.configService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.configService.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.configService.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.apolloService.configService.config.contextPath }}/health - port: {{ .Values.apolloService.configService.containerPort }} - initialDelaySeconds: {{ .Values.apolloService.configService.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.apolloService.configService.readiness.periodSeconds }} - resources: - {{- toYaml .Values.apolloService.configService.resources | nindent 12 }} - {{- with .Values.apolloService.configService.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.configService.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.apolloService.configService.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml deleted file mode 100644 index 36fc5421..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/ingress.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- if .Values.apolloService.configService.ingress.enabled -}} -{{- $fullName := include "apollo.configService.fullName" . -}} -{{- $svcPort := .Values.apolloService.configService.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.apolloService.configService.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.apolloService.configService.ingress.ingressClassName }} - ingressClassName: {{ .Values.apolloService.configService.ingress.ingressClassName }} -{{- end }} -{{- if .Values.apolloService.configService.ingress.tls }} - tls: - {{- range .Values.apolloService.configService.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.apolloService.configService.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml deleted file mode 100644 index 5224b51d..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -{{- $configServiceFullName := include "apollo.configService.fullName" . }} -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ $configServiceFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.apolloService.configdb.host }}:{{ .Values.apolloService.configdb.port }}/{{ .Values.apolloService.configdb.dbName }}{{ if .Values.apolloService.configdb.connectionStringProperties }}?{{ .Values.apolloService.configdb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "configdb.userName is required!" .Values.apolloService.configdb.userName }} - spring.datasource.password = {{ required "configdb.password is required!" .Values.apolloService.configdb.password }} - apollo.config-service.url = {{ include "apollo.configService.serviceUrl" .}} - apollo.admin-service.url = {{ include "apollo.adminService.serviceUrl" .}} - {{- if .Values.apolloService.configService.config.contextPath }} - server.servlet.context-path = {{ .Values.apolloService.configService.config.contextPath }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml deleted file mode 100644 index 9bcbb5e1..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/configservice/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.apolloService.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.configService.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.apolloService.configService.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.apolloService.configService.service.port }} - targetPort: {{ .Values.apolloService.configService.service.targetPort }} - selector: - app: {{ include "apollo.configService.fullName" . }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt b/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt deleted file mode 100644 index d49cfe0e..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/portal/NOTES.txt +++ /dev/null @@ -1,25 +0,0 @@ -Portal url for current release: -{{- if contains "NodePort" .Values.portal.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "apollo.portal.fullName" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.portal.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "apollo.portal.fullName" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "apollo.portal.serviceName" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.portal.service.port }} -{{- else if contains "ClusterIP" .Values.portal.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "apollo.portal.fullName" . }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8070 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8070:8070 -{{- end }} - -{{- if .Values.portal.ingress.enabled }} - -Ingress: -{{- range $host := .Values.portal.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.portal.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml deleted file mode 100644 index 7c86b3e7..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/portal/deployment.yaml +++ /dev/null @@ -1,102 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -{{- $portalFullName := include "apollo.portal.fullName" . }} -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ $portalFullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.portal.replicaCount }} - selector: - matchLabels: - app: {{ $portalFullName }} - {{- with .Values.portal.strategy }} - strategy: - {{- toYaml . | nindent 4 }} - {{- end }} - template: - metadata: - labels: - app: {{ $portalFullName }} - {{- with .Values.portal.annotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.portal.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: secret-{{ $portalFullName }} - Secret: - name: {{ $portalFullName }} - items: - - key: application-github.properties - path: application-github.properties - - key: apollo-env.properties - path: apollo-env.properties - {{- range $fileName, $content := .Values.portal.config.files }} - - key: {{ $fileName }} - path: {{ $fileName }} - {{- end }} - defaultMode: 420 - containers: - - name: {{ .Values.portal.name }} - image: "{{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.portal.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.portal.containerPort }} - protocol: TCP - env: - - name: SPRING_PROFILES_ACTIVE - value: {{ .Values.portal.config.profiles | quote }} - {{- range $key, $value := .Values.portal.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - volumeMounts: - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/application-github.properties - subPath: application-github.properties - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/apollo-env.properties - subPath: apollo-env.properties - {{- range $fileName, $content := .Values.portal.config.files }} - - name: secret-{{ $portalFullName }} - mountPath: /apollo-portal/config/{{ $fileName }} - subPath: {{ $fileName }} - {{- end }} - livenessProbe: - tcpSocket: - port: {{ .Values.portal.containerPort }} - initialDelaySeconds: {{ .Values.portal.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.portal.liveness.periodSeconds }} - readinessProbe: - httpGet: - path: {{ .Values.portal.config.contextPath }}/health - port: {{ .Values.portal.containerPort }} - initialDelaySeconds: {{ .Values.portal.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.portal.readiness.periodSeconds }} - resources: - {{- toYaml .Values.portal.resources | nindent 12 }} - {{- with .Values.portal.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.portal.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.portal.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml deleted file mode 100644 index b01dc05b..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/portal/ingress.yaml +++ /dev/null @@ -1,64 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -# -{{- if .Values.portal.ingress.enabled -}} -{{- $fullName := include "apollo.portal.fullName" . -}} -{{- $svcPort := .Values.portal.service.port -}} -{{- $apiVersion := "extensions/v1beta1" -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1" }} -{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}} -{{- $apiVersion = "networking.k8s.io/v1beta1" }} -{{- end }} -apiVersion: {{ $apiVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "apollo.labels" . | nindent 4 }} - {{- with .Values.portal.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.portal.ingress.ingressClassName }} - ingressClassName: {{ .Values.portal.ingress.ingressClassName }} -{{- end }} -{{- if .Values.portal.ingress.tls }} - tls: - {{- range .Values.portal.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.portal.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - {{- if eq $apiVersion "networking.k8s.io/v1" }} - pathType: Prefix - {{- end }} - backend: - {{- if eq $apiVersion "networking.k8s.io/v1" }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml deleted file mode 100644 index 32e8cfbb..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/portal/secret.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -{{- $portalFullName := include "apollo.portal.fullName" . }} -kind: Secret -apiVersion: v1 -metadata: - name: {{ $portalFullName }} -data: - application-github.properties: | - spring.datasource.url = jdbc:mysql://{{ .Values.portal.portaldb.host }}:{{ .Values.portal.portaldb.port }}/{{ .Values.portal.portaldb.dbName }}{{ if .Values.portal.portaldb.connectionStringProperties }}?{{ .Values.portal.portaldb.connectionStringProperties }}{{ end }} - spring.datasource.username = {{ required "portaldb.userName is required!" .Values.portal.portaldb.userName }} - spring.datasource.password = {{ required "portaldb.password is required!" .Values.portal.portaldb.password }} - {{- if .Values.portal.config.envs }} - apollo.portal.envs = {{ .Values.portal.config.envs }} - {{- end }} - {{- if .Values.portal.config.contextPath }} - server.servlet.context-path = {{ .Values.portal.config.contextPath }} - {{- end }} - apollo-env.properties: | - {{- if .Values.apolloService.enabled }} - {{ .Values.apolloService.meta }} = {{ include "apollo.configService.serviceName" . }}:{{ .Values.apolloService.configService.service.port }} - {{- end }} - {{- if .Values.portal.config.metaServers }} - {{- range $env, $address := .Values.portal.config.metaServers }} - {{ $env }}.meta = {{ $address }} - {{- end }} - {{- end }} -{{- range $fileName, $content := .Values.portal.config.files }} -{{ $fileName | indent 2 }}: | -{{ $content | indent 4 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml b/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml deleted file mode 100644 index da8237d5..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/templates/portal/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- /* -Copyright Drycc Community. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.portal.enabled }} -kind: Service -apiVersion: v1 -metadata: - name: {{ include "apollo.portal.serviceName" . }} - labels: - {{- include "apollo.labels" . | nindent 4 }} -spec: - type: {{ .Values.portal.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.portal.service.port }} - targetPort: {{ .Values.portal.service.targetPort }} - selector: - app: {{ include "apollo.portal.fullName" . }} - sessionAffinity: {{ .Values.portal.service.sessionAffinity }} -{{- end }} diff --git a/addons/apollo-bk/2.3/chart/apollo/values.yaml b/addons/apollo-bk/2.3/chart/apollo/values.yaml deleted file mode 100644 index 7a00ff0e..00000000 --- a/addons/apollo-bk/2.3/chart/apollo/values.yaml +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright Drycc Community. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "" - -## @section Common parameters - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] -## @param namespaceOverride String to fully override common.names.namespace -## -namespaceOverride: "" - -## Enable diagnostic mode in the deployment -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity - -portal: - enabled: true - name: apollo-portal - fullNameOverride: "" - replicaCount: 1 - containerPort: 8070 - image: - registry: registry.drycc.cc - repository: drycc-addons/apollo-portal - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8070 - targetPort: 8070 - type: ClusterIP - sessionAffinity: ClientIP - ingress: - ingressClassName: null - enabled: false - annotations: {} - hosts: - - host: "" - paths: [] - tls: [] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - - config: - # spring profiles to activate - profiles: "github,auth" - # specify the env names, e.g. dev,pro - envs: "dev" - # specify the meta servers, e.g. - # dev: http://apollo-configservice-dev:8080 - # pro: http://apollo-configservice-pro:8080 - metaServers: "" - # specify the context path, e.g. /apollo - contextPath: "" - # extra config files for apollo-portal, e.g. application-ldap.yml - files: {} - - portaldb: - # apolloportaldb host - host: "" - port: 3306 - dbName: ApolloPortalDB - # apolloportaldb user name - userName: "1" - # apolloportaldb password - password: "1" - connectionStringProperties: characterEncoding=utf8 - - -## @section Apollo parameters - -apolloService: - enabled: true - meta: "dev" - configdb: - # apolloconfigdb host - host: "" - port: 3306 - dbName: ApolloConfigDB - # apolloconfigdb user name - userName: "1" - # apolloconfigdb password - password: "1" - connectionStringProperties: characterEncoding=utf8 - - configService: - name: apollo-configservice - fullNameOverride: "" - replicaCount: 2 - containerPort: 8080 - image: - repository: drycc-addons/apollo-configservice - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8080 - targetPort: 8080 - type: ClusterIP - ingress: - ingressClassName: null - enabled: false - annotations: { } - hosts: - - host: "" - paths: [ ] - tls: [ ] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - config: - # spring profiles to activate - profiles: "github,kubernetes" - # override apollo.config-service.url: config service url to be accessed by apollo-client - configServiceUrlOverride: "" - # override apollo.admin-service.url: admin service url to be accessed by apollo-portal - adminServiceUrlOverride: "" - # specify the context path, e.g. /apollo - contextPath: "" - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - - adminService: - name: apollo-adminservice - fullNameOverride: "" - replicaCount: 2 - containerPort: 8090 - image: - registry: registry.drycc.cc - repository: drycc-addons/apollo-adminservice - tag: "2.3" - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - service: - fullNameOverride: "" - port: 8090 - targetPort: 8090 - type: ClusterIP - ingress: - ingressClassName: null - enabled: false - annotations: { } - hosts: - - host: "" - paths: [ ] - tls: [ ] - liveness: - initialDelaySeconds: 100 - periodSeconds: 10 - readiness: - initialDelaySeconds: 30 - periodSeconds: 5 - config: - # spring profiles to activate - profiles: "github,kubernetes" - # specify the context path, e.g. /apollo - contextPath: "" - # environment variables passed to the container, e.g. JAVA_OPTS - env: {} - strategy: {} - resources: {} - nodeSelector: {} - tolerations: [] - affinity: {} - annotations: {} - diff --git a/addons/apollo-bk/2.3/meta.yaml b/addons/apollo-bk/2.3/meta.yaml deleted file mode 100644 index edb96d83..00000000 --- a/addons/apollo-bk/2.3/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: apollo -version: 2 -id: 06653a76-126d-4c9d-a929-e4841185ab68 -description: "apollo." -displayName: "apollo" -metadata: - displayName: "apollo" - provider: - name: drycc - supportURL: https://www.apolloconfig.com/ - documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/apollo-adminservice -tags: apollo -bindable: true -instances_retrievable: true -bindings_retrievable: true -plan_updateable: true -allow_parameters: -- name: "portal.enabled" - required: false - description: "portal.enabled config for values.yaml" -- name: "portal.config" - required: false - description: "portal.config config for values.yaml" -- name: "portal.portaldb" - required: false - description: "portal.config config for values.yaml" -archive: false diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml deleted file mode 100644 index e37ddd93..00000000 --- a/addons/apollo-bk/2.3/plans/standard-1c2g2w/bind.yaml +++ /dev/null @@ -1,43 +0,0 @@ -credential: - {{ if (eq .Values.service.type "LoadBalancer") }} - - name: EXTERNAL_WEB_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .status.loadBalancer.ingress[*].ip }' - {{- end }} - - - name: WEB_HOST - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.clusterIP }' - - - name: WEB_PORT - valueFrom: - serviceRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="http")].port }' - - - name: USER - value: {{ .Values.auth.username }} - - {{- if (not .Values.auth.existingSecret) }} - - name: AIRFLOW_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-password }' - - - name: AIRFLOW_FERNET_KEY - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-fernet-key }' - - - name: AIRFLOW_SECRET_KEY - valueFrom: - secretKeyRef: - name: {{ include "common.names.fullname" . }} - jsonpath: '{ .data.airflow-secret-key }' - {{- end }} diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json b/addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json deleted file mode 100644 index 66ebbaa0..00000000 --- a/addons/apollo-bk/2.3/plans/standard-1c2g2w/instance-schema.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "properties": { - "imagePullPolicy": { - "type": "string", - "enum": ["Always", "IfNotPresent", "Never"], - "default": "IfNotPresent", - "title": "Image pull policy" - } - } -} \ No newline at end of file diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml deleted file mode 100644 index a10be35d..00000000 --- a/addons/apollo-bk/2.3/plans/standard-1c2g2w/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-1c2g2w" -id: 75f949c8-8366-4805-aa8b-553de0ec6c24 -description: "airflow standard-1c2g2w plan which limit resources 2 workers per worker 1 core memory size 2Gi." -displayName: "standard-1c2g2w" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml b/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml deleted file mode 100644 index caeaba82..00000000 --- a/addons/apollo-bk/2.3/plans/standard-1c2g2w/values.yaml +++ /dev/null @@ -1,60 +0,0 @@ -## @param fullnameOverride String to fully override common.names.fullname template -## -fullnameOverride: hb-airflow-standard-1c2g2w - -## @section Airflow web parameters - -web: - ## @param web.replicaCount Number of Airflow web replicas - ## - replicaCount: 1 - ## Airflow web resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param web.resources.limits The resources limits for the Airflow web containers - ## @param web.resources.requests The requested resources for the Airflow web containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi - -## @section Airflow scheduler parameters - -scheduler: - ## @param scheduler.replicaCount Number of scheduler replicas - ## - replicaCount: 1 - ## Airflow scheduler resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param scheduler.resources.limits The resources limits for the Airflow scheduler containers - ## @param scheduler.resources.requests The requested resources for the Airflow scheduler containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi - -## @section Airflow worker parameters - -worker: - ## @param worker.replicaCount Number of Airflow worker replicas - ## - replicaCount: 2 - ## Airflow worker resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param worker.resources.limits The resources limits for the Airflow worker containers - ## @param worker.resources.requests The requested resources for the Airflow worker containers - ## - resources: - limits: - cpu: 1000m - memory: 2048Mi - requests: - cpu: 500m - memory: 1024Mi From e631b357036359b489ce6f774cb7983605878b67 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 4 Sep 2025 11:50:58 +0800 Subject: [PATCH 71/93] chore(addons): reset resource requests (#117) --- .../24/plans/standard-16c64g1000/values.yaml | 4 ++-- addons/clickhouse/24/plans/standard-2c4g20/values.yaml | 4 ++-- .../24/plans/standard-32c64g12000/values.yaml | 4 ++-- .../clickhouse/24/plans/standard-4c16g100/values.yaml | 4 ++-- .../clickhouse/24/plans/standard-8c32g500/values.yaml | 4 ++-- .../mongodb/7.0/plans/standard-16c64g400/values.yaml | 4 ++-- addons/mongodb/7.0/plans/standard-1c2g10/values.yaml | 4 ++-- addons/mongodb/7.0/plans/standard-2c4g20/values.yaml | 4 ++-- addons/mongodb/7.0/plans/standard-2c8g50/values.yaml | 4 ++-- .../mongodb/7.0/plans/standard-32c128g800/values.yaml | 4 ++-- addons/mongodb/7.0/plans/standard-4c16g100/values.yaml | 4 ++-- addons/mongodb/7.0/plans/standard-8c32g200/values.yaml | 4 ++-- .../8.0/plans/standard-16c64g400/values.yaml | 10 +++++----- .../8.0/plans/standard-2c4g20/values.yaml | 6 +++--- .../8.0/plans/standard-2c8g50/values.yaml | 6 +++--- .../8.0/plans/standard-32c128g800/values.yaml | 10 +++++----- .../8.0/plans/standard-4c16g100/values.yaml | 4 ++-- .../8.0/plans/standard-8c32g200/values.yaml | 10 +++++----- .../15/plans/standard-16c64g400/values.yaml | 4 ++-- .../15/plans/standard-2c4g20/values.yaml | 4 ++-- .../15/plans/standard-2c8g50/values.yaml | 4 ++-- .../15/plans/standard-32c128g800/values.yaml | 4 ++-- .../15/plans/standard-32c64g4000/values.yaml | 4 ++-- .../15/plans/standard-4c16g100/values.yaml | 4 ++-- .../15/plans/standard-8c32g200/values.yaml | 4 ++-- .../16/plans/standard-16c64g400/values.yaml | 4 ++-- .../16/plans/standard-2c4g20/values.yaml | 4 ++-- .../16/plans/standard-2c8g50/values.yaml | 4 ++-- .../16/plans/standard-32c128g800/values.yaml | 4 ++-- .../16/plans/standard-32c64g4000/values.yaml | 4 ++-- .../16/plans/standard-4c16g100/values.yaml | 4 ++-- .../16/plans/standard-8c32g200/values.yaml | 4 ++-- 32 files changed, 75 insertions(+), 75 deletions(-) diff --git a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml index 96c4ccbd..3e25e3df 100644 --- a/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml +++ b/addons/clickhouse/24/plans/standard-16c64g1000/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 16000m memory: 64Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistence: size: 1000Gi diff --git a/addons/clickhouse/24/plans/standard-2c4g20/values.yaml b/addons/clickhouse/24/plans/standard-2c4g20/values.yaml index 0c5d38c4..16da5abd 100644 --- a/addons/clickhouse/24/plans/standard-2c4g20/values.yaml +++ b/addons/clickhouse/24/plans/standard-2c4g20/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2000m memory: 4Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi persistence: size: 20Gi diff --git a/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml index a933b9da..c14f97a5 100644 --- a/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml +++ b/addons/clickhouse/24/plans/standard-32c64g12000/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 32000m memory: 64Gi requests: - cpu: 8000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistence: size: 12000Gi diff --git a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml index 39aa6b39..d8ef0c53 100644 --- a/addons/clickhouse/24/plans/standard-4c16g100/values.yaml +++ b/addons/clickhouse/24/plans/standard-4c16g100/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4000m memory: 16Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi persistence: size: 100Gi diff --git a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml index 117e2e58..b34de919 100644 --- a/addons/clickhouse/24/plans/standard-8c32g500/values.yaml +++ b/addons/clickhouse/24/plans/standard-8c32g500/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8000m memory: 32Gi requests: - cpu: 2000m - memory: 8Gi + cpu: 100m + memory: 1Gi persistence: size: 500Gi diff --git a/addons/mongodb/7.0/plans/standard-16c64g400/values.yaml b/addons/mongodb/7.0/plans/standard-16c64g400/values.yaml index 9ef94dff..400e61dc 100644 --- a/addons/mongodb/7.0/plans/standard-16c64g400/values.yaml +++ b/addons/mongodb/7.0/plans/standard-16c64g400/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 16000m memory: 64Gi requests: - cpu: 8000m - memory: 32Gi + cpu: 100m + memory: 1Gi persistence: size: 400Gi diff --git a/addons/mongodb/7.0/plans/standard-1c2g10/values.yaml b/addons/mongodb/7.0/plans/standard-1c2g10/values.yaml index 2f543d11..2e026ef2 100644 --- a/addons/mongodb/7.0/plans/standard-1c2g10/values.yaml +++ b/addons/mongodb/7.0/plans/standard-1c2g10/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 1000m memory: 2Gi requests: - cpu: 1000m - memory: 2Gi + cpu: 100m + memory: 1Gi persistence: size: 10Gi diff --git a/addons/mongodb/7.0/plans/standard-2c4g20/values.yaml b/addons/mongodb/7.0/plans/standard-2c4g20/values.yaml index 9b7336ab..20dd90d0 100644 --- a/addons/mongodb/7.0/plans/standard-2c4g20/values.yaml +++ b/addons/mongodb/7.0/plans/standard-2c4g20/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2000m memory: 4Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi persistence: size: 20Gi diff --git a/addons/mongodb/7.0/plans/standard-2c8g50/values.yaml b/addons/mongodb/7.0/plans/standard-2c8g50/values.yaml index e86171e5..0eaf31f4 100644 --- a/addons/mongodb/7.0/plans/standard-2c8g50/values.yaml +++ b/addons/mongodb/7.0/plans/standard-2c8g50/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 2000m memory: 8Gi requests: - cpu: 2000m - memory: 8Gi + cpu: 100m + memory: 1Gi persistence: size: 50Gi diff --git a/addons/mongodb/7.0/plans/standard-32c128g800/values.yaml b/addons/mongodb/7.0/plans/standard-32c128g800/values.yaml index 4c339766..7c95075e 100644 --- a/addons/mongodb/7.0/plans/standard-32c128g800/values.yaml +++ b/addons/mongodb/7.0/plans/standard-32c128g800/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 32000m memory: 128Gi requests: - cpu: 16000m - memory: 64Gi + cpu: 100m + memory: 1Gi persistence: size: 800Gi diff --git a/addons/mongodb/7.0/plans/standard-4c16g100/values.yaml b/addons/mongodb/7.0/plans/standard-4c16g100/values.yaml index 51c69c34..bd4a6497 100644 --- a/addons/mongodb/7.0/plans/standard-4c16g100/values.yaml +++ b/addons/mongodb/7.0/plans/standard-4c16g100/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 4000m memory: 16Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistence: size: 100Gi diff --git a/addons/mongodb/7.0/plans/standard-8c32g200/values.yaml b/addons/mongodb/7.0/plans/standard-8c32g200/values.yaml index 442f087d..c29f09d4 100644 --- a/addons/mongodb/7.0/plans/standard-8c32g200/values.yaml +++ b/addons/mongodb/7.0/plans/standard-8c32g200/values.yaml @@ -16,8 +16,8 @@ resources: cpu: 8000m memory: 32Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistence: size: 200Gi diff --git a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml index b7bfb0ea..2360e5f9 100644 --- a/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-16c64g400/values.yaml @@ -31,8 +31,8 @@ primary: cpu: 16000m memory: 64Gi requests: - cpu: 8000m - memory: 32Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters @@ -83,6 +83,6 @@ router: limits: cpu: 1600m memory: 2Gi - requests: - cpu: 1600m - memory: 2Gi \ No newline at end of file + requests: + cpu: 100m + memory: 1Gi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml index 20775ba1..7a075ef2 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml @@ -32,8 +32,8 @@ primary: cpu: 2000m memory: 4Gi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters @@ -86,5 +86,5 @@ router: cpu: 500m memory: 512Mi requests: - cpu: 500m + cpu: 100m memory: 512Mi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml index 98bc94c8..5be1ec90 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c8g50/values.yaml @@ -31,8 +31,8 @@ primary: cpu: 2000m memory: 8Gi requests: - cpu: 2000m - memory: 8Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters @@ -83,5 +83,5 @@ router: cpu: 500m memory: 512Mi requests: - cpu: 500m + cpu: 100m memory: 512Mi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml index d854cec1..d76c4f3a 100644 --- a/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-32c128g800/values.yaml @@ -31,8 +31,8 @@ primary: cpu: 32000m memory: 128Gi requests: - cpu: 16000m - memory: 64Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters @@ -82,6 +82,6 @@ router: limits: cpu: 3200m memory: 4Gi - requests: - cpu: 3200m - memory: 4Gi \ No newline at end of file + requests: + cpu: 100m + memory: 1Gi \ No newline at end of file diff --git a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml index 6312e572..944047a0 100644 --- a/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-4c16g100/values.yaml @@ -32,8 +32,8 @@ primary: cpu: 4000m memory: 16Gi requests: - cpu: 4000m - memory: 12Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters diff --git a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml index 7c7f49a0..fb9a0c0c 100644 --- a/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-8c32g200/values.yaml @@ -31,8 +31,8 @@ primary: cpu: 8000m memory: 32Gi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi ## @section Persistence parameters @@ -83,6 +83,6 @@ router: limits: cpu: 800m memory: 1024Mi - requests: - cpu: 800m - memory: 1024Mi \ No newline at end of file + requests: + cpu: 100m + memory: 1Gi \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml index 8fe5e689..f4cd8ae8 100644 --- a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml @@ -70,8 +70,8 @@ resources: memory: 64Gi hugepages-2Mi: 40Mi requests: - cpu: 8000m - memory: 32Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml index 27ea5c42..bedec78d 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml @@ -70,8 +70,8 @@ resources: memory: 4Gi hugepages-2Mi: 20Mi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml index 1062740f..564731a7 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml @@ -72,8 +72,8 @@ resources: memory: 8Gi hugepages-2Mi: 20Mi requests: - cpu: 2000m - memory: 8Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml index a2f1bb41..3af68467 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml @@ -71,8 +71,8 @@ resources: memory: 128Gi hugepages-2Mi: 80Mi requests: - cpu: 16000m - memory: 64Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml index cdc3c494..da70e1c9 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml @@ -71,8 +71,8 @@ resources: memory: 64Gi hugepages-2Mi: 80Mi requests: - cpu: 16000m - memory: 32Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml index 3b5d6b45..6c91f192 100644 --- a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml @@ -72,8 +72,8 @@ resources: memory: 16Gi hugepages-2Mi: 50Mi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml index e2484df5..dee10714 100644 --- a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml @@ -71,8 +71,8 @@ resources: memory: 32Gi hugepages-2Mi: 60Mi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml index 8fe5e689..f4cd8ae8 100644 --- a/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml @@ -70,8 +70,8 @@ resources: memory: 64Gi hugepages-2Mi: 40Mi requests: - cpu: 8000m - memory: 32Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml index 27ea5c42..bedec78d 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml @@ -70,8 +70,8 @@ resources: memory: 4Gi hugepages-2Mi: 20Mi requests: - cpu: 2000m - memory: 4Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml index 1062740f..564731a7 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml @@ -72,8 +72,8 @@ resources: memory: 8Gi hugepages-2Mi: 20Mi requests: - cpu: 2000m - memory: 8Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml index a2f1bb41..3af68467 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml @@ -71,8 +71,8 @@ resources: memory: 128Gi hugepages-2Mi: 80Mi requests: - cpu: 16000m - memory: 64Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml index cdc3c494..da70e1c9 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml @@ -71,8 +71,8 @@ resources: memory: 64Gi hugepages-2Mi: 80Mi requests: - cpu: 16000m - memory: 32Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml index 3b5d6b45..6c91f192 100644 --- a/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml @@ -72,8 +72,8 @@ resources: memory: 16Gi hugepages-2Mi: 50Mi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml index e2484df5..dee10714 100644 --- a/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml @@ -71,8 +71,8 @@ resources: memory: 32Gi hugepages-2Mi: 60Mi requests: - cpu: 4000m - memory: 16Gi + cpu: 100m + memory: 1Gi persistentVolume: enabled: true From 29b15ed2f007118e3c30ab9b4c0c70a1a9922325 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 4 Sep 2025 15:47:22 +0800 Subject: [PATCH 72/93] chore(mysql-cluster): fix typo (#118) --- addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml index 7a075ef2..ec36604e 100644 --- a/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml +++ b/addons/mysql-cluster/8.0/plans/standard-2c4g20/values.yaml @@ -32,8 +32,9 @@ primary: cpu: 2000m memory: 4Gi requests: - cpu: 100m - memory: 1Gi + cpu: 100m + memory: 1Gi + ## @section Persistence parameters From 9dac64bec571e59c7b5a28099f6b62f9b8418744 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 4 Sep 2025 16:35:14 +0800 Subject: [PATCH 73/93] chore(seaweedfs): pvc annotations config --- .../chart/seaweedfs/templates/filer/statefulset.yaml | 3 +++ .../seaweedfs/templates/master/statefulset.yaml | 3 +++ .../seaweedfs/templates/volume/statefulset.yaml | 6 ++++++ addons/seaweedfs/3/chart/seaweedfs/values.yaml | 12 ++++++++++++ 4 files changed, 24 insertions(+) diff --git a/addons/seaweedfs/3/chart/seaweedfs/templates/filer/statefulset.yaml b/addons/seaweedfs/3/chart/seaweedfs/templates/filer/statefulset.yaml index 793adf6c..7df70e0b 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/templates/filer/statefulset.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/templates/filer/statefulset.yaml @@ -136,6 +136,9 @@ spec: volumeClaimTemplates: - metadata: name: datadir + {{- if .Values.filer.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.filer.persistence.annotations "context" $) | nindent 10 }} + {{- end }} spec: accessModes: [ "ReadWriteOnce" ] {{- if .Values.filer.persistence.storageClass }} diff --git a/addons/seaweedfs/3/chart/seaweedfs/templates/master/statefulset.yaml b/addons/seaweedfs/3/chart/seaweedfs/templates/master/statefulset.yaml index fd070965..bce819f9 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/templates/master/statefulset.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/templates/master/statefulset.yaml @@ -122,6 +122,9 @@ spec: volumeClaimTemplates: - metadata: name: datadir + {{- if .Values.master.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.annotations "context" $) | nindent 10 }} + {{- end }} spec: accessModes: [ "ReadWriteOnce" ] {{- if .Values.master.persistence.storageClass }} diff --git a/addons/seaweedfs/3/chart/seaweedfs/templates/volume/statefulset.yaml b/addons/seaweedfs/3/chart/seaweedfs/templates/volume/statefulset.yaml index 83d1a253..6e8a4250 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/templates/volume/statefulset.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/templates/volume/statefulset.yaml @@ -148,6 +148,9 @@ spec: {{- if .Values.volume.persistence.meta.enabled }} - metadata: name: datadir-meta + {{- if .Values.volume.persistence.meta.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.volume.persistence.meta.annotations "context" $) | nindent 10 }} + {{- end }} spec: accessModes: [ "ReadWriteOnce" ] {{- if .Values.volume.persistence.meta.storageClass }} @@ -164,6 +167,9 @@ spec: {{- if .Values.volume.persistence.data.enabled }} - metadata: name: datadir-data + {{- if .Values.volume.persistence.data.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.volume.persistence.data.annotations "context" $) | nindent 10 }} + {{- end }} spec: accessModes: [ "ReadWriteOnce" ] {{- if .Values.volume.persistence.data.storageClass }} diff --git a/addons/seaweedfs/3/chart/seaweedfs/values.yaml b/addons/seaweedfs/3/chart/seaweedfs/values.yaml index ef54e961..27734800 100644 --- a/addons/seaweedfs/3/chart/seaweedfs/values.yaml +++ b/addons/seaweedfs/3/chart/seaweedfs/values.yaml @@ -196,6 +196,9 @@ master: enabled: false size: 5Gi storageClass: "" + ## @param annotations Persistent Volume Claim annotations + ## + annotations: {} garbageThreshold: 0.3 defaultReplication: "001" ## persistentVolumeClaimRetentionPolicy @@ -287,6 +290,9 @@ filer: enabled: false size: 5Gi storageClass: "" + ## @param annotations Persistent Volume Claim annotations + ## + annotations: {} ## persistentVolumeClaimRetentionPolicy ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention ## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet @@ -373,10 +379,16 @@ volume: enabled: false size: 5Gi storageClass: "" + ## @param annotations Persistent Volume Claim annotations + ## + annotations: {} data: enabled: false size: 5Gi storageClass: "" + ## @param annotations Persistent Volume Claim annotations + ## + annotations: {} compactionMBps: 50 ## persistentVolumeClaimRetentionPolicy ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention From 017d8502c49535e6d8b349d97290ccb3028bf33f Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 5 Sep 2025 15:05:02 +0800 Subject: [PATCH 74/93] chore(addons): add redis-cluster\flink plans --- .../1.17/plans/standard-4c10g5w/bind.yaml | 35 +++++++++++ .../standard-4c10g5w/instance-schema.json | 21 +++++++ .../1.17/plans/standard-4c10g5w/meta.yaml | 6 ++ .../1.17/plans/standard-4c10g5w/values.yaml | 62 +++++++++++++++++++ .../7.0/plans/standard-73728/bind.yaml | 37 +++++++++++ .../plans/standard-73728/instance-schema.json | 12 ++++ .../7.0/plans/standard-73728/meta.yaml | 6 ++ .../7.0/plans/standard-73728/values.yaml | 61 ++++++++++++++++++ 8 files changed, 240 insertions(+) create mode 100644 addons/flink/1.17/plans/standard-4c10g5w/bind.yaml create mode 100644 addons/flink/1.17/plans/standard-4c10g5w/instance-schema.json create mode 100644 addons/flink/1.17/plans/standard-4c10g5w/meta.yaml create mode 100644 addons/flink/1.17/plans/standard-4c10g5w/values.yaml create mode 100644 addons/redis-cluster/7.0/plans/standard-73728/bind.yaml create mode 100644 addons/redis-cluster/7.0/plans/standard-73728/instance-schema.json create mode 100644 addons/redis-cluster/7.0/plans/standard-73728/meta.yaml create mode 100644 addons/redis-cluster/7.0/plans/standard-73728/values.yaml diff --git a/addons/flink/1.17/plans/standard-4c10g5w/bind.yaml b/addons/flink/1.17/plans/standard-4c10g5w/bind.yaml new file mode 100644 index 00000000..67083d0f --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c10g5w/bind.yaml @@ -0,0 +1,35 @@ +credential: + {{- if (eq .Values.jobmanager.service.type "LoadBalancer") }} + - name: JOBMANAGER_EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "flink.jobmanager.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: JOBMANAGER_DOMAIN + value: {{ include "flink.jobmanager.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: JOBMANAGER_HOST + valueFrom: + serviceRef: + name: {{ include "flink.jobmanager.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: JOBMANAGER_PORT_HTTP + valueFrom: + serviceRef: + name: {{ include "flink.jobmanager.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="http")].port }' + + - name: JOBMANAGER_PORT_RPC + valueFrom: + serviceRef: + name: {{ include "flink.jobmanager.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-rpc")].port }' + + - name: JOBMANAGER_PORT_BLOB + valueFrom: + serviceRef: + name: {{ include "flink.jobmanager.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-blob")].port }' diff --git a/addons/flink/1.17/plans/standard-4c10g5w/instance-schema.json b/addons/flink/1.17/plans/standard-4c10g5w/instance-schema.json new file mode 100644 index 00000000..4a2dbf10 --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c10g5w/instance-schema.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "taskmanager": { + "type": "object", + "properties": { + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "description": "persistence size", + "pattern": "^(?:1Ti|(?:[1-9]\\d{0,2}|100[0-9]|101\\d|102[0-4])Gi)$" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/addons/flink/1.17/plans/standard-4c10g5w/meta.yaml b/addons/flink/1.17/plans/standard-4c10g5w/meta.yaml new file mode 100644 index 00000000..46b348aa --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c10g5w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c10g5w" +id: 1efdf417-6c42-46b4-9fba-6796419f9590 +description: "flink standard-4c10g5w plan which limit resources 4 cores 10G memory and 5 taskmanagers" +displayName: "standard-4c10g5w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/flink/1.17/plans/standard-4c10g5w/values.yaml b/addons/flink/1.17/plans/standard-4c10g5w/values.yaml new file mode 100644 index 00000000..8bdd8d87 --- /dev/null +++ b/addons/flink/1.17/plans/standard-4c10g5w/values.yaml @@ -0,0 +1,62 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-flink-standard-4c10g5w +## @section Jobmanager deployment parameters +## + +jobmanager: + ## @param jobmanager.replicaCount Number of Apache Flink Jobmanager replicas + ## + replicaCount: 1 + ## Apache Flink pods' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## Minimum memory for development is 4GB and 2 CPU cores + ## Minimum memory for production is 8GB and 4 CPU cores + ## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html + resources: + limits: + cpu: 4 + memory: 10Gi + requests: + cpu: 500m + memory: 1Gi + ## Apache Flink jobmanager.service parameters + ## + service: + ## @param jobmanager.service.type Apache Flink service type + ## + type: ClusterIP +## @section TaskManager deployment parameters +## + +taskmanager: + ## @param taskmanager.replicaCount Number of Apache Flink replicas + ## + replicaCount: 5 + ## Apache Flink pods' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## Minimum memory for development is 4GB and 2 CPU cores + ## Minimum memory for production is 8GB and 4 CPU cores + ## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html + resources: + limits: + cpu: 4 + memory: 10Gi + requests: + cpu: 500m + memory: 1Gi + ## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container + extraEnvVars: + # taskmanager.numberOfTaskSlots + - name: FLINK_TASK_MANAGER_NUMBER_OF_TASK_SLOTS + value: "4" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param taskmanager.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param taskmanager.persistence.size Persistent Volume Size + ## + size: 8Gi \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-73728/bind.yaml b/addons/redis-cluster/7.0/plans/standard-73728/bind.yaml new file mode 100644 index 00000000..a41f0cb8 --- /dev/null +++ b/addons/redis-cluster/7.0/plans/standard-73728/bind.yaml @@ -0,0 +1,37 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-redis")].port }' + + - name: PROXY_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-proxy")].port }' + + {{- if and .Values.usePassword (not .Values.existingSecret) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.redis-password }' + {{- end }} diff --git a/addons/redis-cluster/7.0/plans/standard-73728/instance-schema.json b/addons/redis-cluster/7.0/plans/standard-73728/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/redis-cluster/7.0/plans/standard-73728/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/redis-cluster/7.0/plans/standard-73728/meta.yaml b/addons/redis-cluster/7.0/plans/standard-73728/meta.yaml new file mode 100644 index 00000000..9ff2f38a --- /dev/null +++ b/addons/redis-cluster/7.0/plans/standard-73728/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-73728" +id: 66e78b91-c7da-4bb1-a058-6e3d2e109746 +description: "Redis cluster standard-73728 plan which limit resources memory size 73728Mi." +displayName: "standard-73728" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/redis-cluster/7.0/plans/standard-73728/values.yaml b/addons/redis-cluster/7.0/plans/standard-73728/values.yaml new file mode 100644 index 00000000..43bf6843 --- /dev/null +++ b/addons/redis-cluster/7.0/plans/standard-73728/values.yaml @@ -0,0 +1,61 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "hb-redis-cluster-standard-73728" + +## io-thread +ioThread: + enabled: true + doReads: "yes" + counts: 2 + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.path Path to mount the volume at, to use other images Redis® images. + ## + path: /drycc/redis/data + accessModes: + - ReadWriteOnce + ## @param persistence.size Size of data volume + ## + size: 128Gi + +## @section Redis® statefulset parameters +## + +redis: + ## Redis® resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param redis.resources.limits The resources limits for the container + ## @param redis.resources.requests The requested resources for the container + ## + resources: + limits: + cpu: 4 + memory: 72Gi + requests: + cpu: 1 + memory: 8Gi + +## @section Proxy® statefulset parameters +## +proxy: + ## max-procs + maxProcs: 4 + ## backend-init-connections + backendInitConnections: 10 + ## backend-idle-connections + backendIdleConnections: 300 + ## Proxy® resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the container + ## @param proxy.resources.requests The requested resources for the container + ## + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 500m + memory: 1Gi From 0eb7b71dd29a87f693269f8ab0d6ab6c00d6ac5b Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 8 Sep 2025 19:13:51 +0800 Subject: [PATCH 75/93] fix(pvc): annotations nindent --- .../airflow/2/chart/airflow/templates/worker/statefulset.yaml | 2 +- .../airflow/3/chart/airflow-3/templates/worker/statefulset.yaml | 2 +- .../1.17/chart/flink/templates/taskmanager/statefulset.yaml | 2 +- .../2.10/chart/opensearch/templates/data/statefulset.yaml | 2 +- .../2.10/chart/opensearch/templates/master/statefulset.yaml | 2 +- addons/spark/3.4/chart/spark/templates/statefulset-master.yaml | 2 +- addons/spark/3.4/chart/spark/templates/statefulset-worker.yaml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/addons/airflow/2/chart/airflow/templates/worker/statefulset.yaml b/addons/airflow/2/chart/airflow/templates/worker/statefulset.yaml index 9fa960ab..a1202d6e 100644 --- a/addons/airflow/2/chart/airflow/templates/worker/statefulset.yaml +++ b/addons/airflow/2/chart/airflow/templates/worker/statefulset.yaml @@ -274,7 +274,7 @@ spec: name: "data" {{- if or .Values.worker.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml index 44fb1376..166f284a 100644 --- a/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml +++ b/addons/airflow/3/chart/airflow-3/templates/worker/statefulset.yaml @@ -335,7 +335,7 @@ spec: name: "data" {{- if or .Values.worker.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/flink/1.17/chart/flink/templates/taskmanager/statefulset.yaml b/addons/flink/1.17/chart/flink/templates/taskmanager/statefulset.yaml index dcbccc8d..b149f27a 100644 --- a/addons/flink/1.17/chart/flink/templates/taskmanager/statefulset.yaml +++ b/addons/flink/1.17/chart/flink/templates/taskmanager/statefulset.yaml @@ -185,7 +185,7 @@ spec: name: "data" {{- if or .Values.taskmanager.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.taskmanager.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/opensearch/2.10/chart/opensearch/templates/data/statefulset.yaml b/addons/opensearch/2.10/chart/opensearch/templates/data/statefulset.yaml index 527bc948..5e878063 100644 --- a/addons/opensearch/2.10/chart/opensearch/templates/data/statefulset.yaml +++ b/addons/opensearch/2.10/chart/opensearch/templates/data/statefulset.yaml @@ -337,7 +337,7 @@ spec: name: "data" {{- if or .Values.data.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.data.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/opensearch/2.10/chart/opensearch/templates/master/statefulset.yaml b/addons/opensearch/2.10/chart/opensearch/templates/master/statefulset.yaml index e4b18de2..86255785 100644 --- a/addons/opensearch/2.10/chart/opensearch/templates/master/statefulset.yaml +++ b/addons/opensearch/2.10/chart/opensearch/templates/master/statefulset.yaml @@ -349,7 +349,7 @@ spec: name: "data" {{- if or .Values.master.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/spark/3.4/chart/spark/templates/statefulset-master.yaml b/addons/spark/3.4/chart/spark/templates/statefulset-master.yaml index 2235bd11..8b9888eb 100644 --- a/addons/spark/3.4/chart/spark/templates/statefulset-master.yaml +++ b/addons/spark/3.4/chart/spark/templates/statefulset-master.yaml @@ -364,7 +364,7 @@ spec: name: "work-data" {{- if or .Values.master.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} diff --git a/addons/spark/3.4/chart/spark/templates/statefulset-worker.yaml b/addons/spark/3.4/chart/spark/templates/statefulset-worker.yaml index d2291c0b..f880bf9e 100644 --- a/addons/spark/3.4/chart/spark/templates/statefulset-worker.yaml +++ b/addons/spark/3.4/chart/spark/templates/statefulset-worker.yaml @@ -388,7 +388,7 @@ spec: name: "work-data" {{- if or .Values.worker.persistence.annotations .Values.commonAnnotations }} {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.worker.persistence.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $) | nindent 10 }} {{- end }} {{- if .Values.commonLabels }} labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} From 9906ed7f75c357ee643b6a94dd2e7639506dbd9e Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 25 Sep 2025 16:40:57 +0800 Subject: [PATCH 76/93] chore(addons): add postgresql-cluster-17 --- addons/index.yaml | 14 +- .../17/chart/postgresql-cluster-17/Chart.yaml | 28 ++ .../17/chart/postgresql-cluster-17/README.md | 166 +++++++ .../postgresql-cluster-17/templates/NOTES.txt | 25 + .../templates/_helpers.tpl | 219 +++++++++ .../templates/cm-backup.yaml | 16 + .../templates/cm-logicalbackup .yaml | 19 + .../templates/cm-patroni.yaml | 20 + .../templates/cm-postgresql.yaml | 18 + .../templates/cronjob.yaml | 43 ++ .../templates/logicalbackup-cronjob.yaml | 69 +++ .../templates/networkpolicy.yaml | 54 +++ .../postgresql-cluster-17/templates/role.yaml | 49 ++ .../templates/rolebinding.yaml | 19 + .../postgresql-cluster-17/templates/sec.yaml | 18 + .../templates/serviceaccount.yaml | 12 + .../templates/statefulset.yaml | 273 +++++++++++ .../templates/svc-config.yaml | 11 + .../templates/svc-master.yaml | 24 + .../templates/svc-metrics.yaml | 32 ++ .../templates/svc-relp.yaml | 26 ++ .../postgresql-cluster-17/templates/svc.yaml | 18 + .../chart/postgresql-cluster-17/values.yaml | 440 ++++++++++++++++++ addons/postgresql-cluster/17/meta.yaml | 30 ++ .../17/plans/standard-16c64g400/bind.yaml | 41 ++ .../standard-16c64g400/instance-schema.json | 12 + .../17/plans/standard-16c64g400/meta.yaml | 6 + .../17/plans/standard-16c64g400/values.yaml | 81 ++++ .../17/plans/standard-2c4g20/bind.yaml | 41 ++ .../standard-2c4g20/instance-schema.json | 12 + .../17/plans/standard-2c4g20/meta.yaml | 6 + .../17/plans/standard-2c4g20/values.yaml | 81 ++++ .../17/plans/standard-2c8g50/bind.yaml | 41 ++ .../standard-2c8g50/instance-schema.json | 12 + .../17/plans/standard-2c8g50/meta.yaml | 6 + .../17/plans/standard-2c8g50/values.yaml | 83 ++++ .../17/plans/standard-32c128g800/bind.yaml | 41 ++ .../standard-32c128g800/instance-schema.json | 12 + .../17/plans/standard-32c128g800/meta.yaml | 6 + .../17/plans/standard-32c128g800/values.yaml | 82 ++++ .../17/plans/standard-32c64g4000/bind.yaml | 41 ++ .../standard-32c64g4000/instance-schema.json | 12 + .../17/plans/standard-32c64g4000/meta.yaml | 6 + .../17/plans/standard-32c64g4000/values.yaml | 82 ++++ .../17/plans/standard-4c16g100/bind.yaml | 41 ++ .../standard-4c16g100/instance-schema.json | 12 + .../17/plans/standard-4c16g100/meta.yaml | 6 + .../17/plans/standard-4c16g100/values.yaml | 83 ++++ .../17/plans/standard-8c32g200/bind.yaml | 41 ++ .../standard-8c32g200/instance-schema.json | 12 + .../17/plans/standard-8c32g200/meta.yaml | 6 + .../17/plans/standard-8c32g200/values.yaml | 82 ++++ 52 files changed, 2624 insertions(+), 6 deletions(-) create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/Chart.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/README.md create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/NOTES.txt create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/_helpers.tpl create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-backup.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-logicalbackup .yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-patroni.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-postgresql.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cronjob.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/logicalbackup-cronjob.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/networkpolicy.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/role.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/rolebinding.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/sec.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/serviceaccount.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-config.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-master.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-metrics.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-relp.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc.yaml create mode 100644 addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml create mode 100644 addons/postgresql-cluster/17/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-16c64g400/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-16c64g400/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c4g20/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-2c4g20/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c8g50/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-2c8g50/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c128g800/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-32c128g800/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c64g4000/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-32c64g4000/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-4c16g100/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-4c16g100/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-8c32g200/instance-schema.json create mode 100644 addons/postgresql-cluster/17/plans/standard-8c32g200/meta.yaml create mode 100644 addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index ef1f050f..2e8e5ea4 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -47,6 +47,8 @@ entries: description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." - version: 16 description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." + - version: 17 + description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." seaweedfs: - version: 3 description: "SeaweedFS is a fast distributed storage system for blobs, objects, files, and data lake, for billions of files." @@ -62,24 +64,24 @@ entries: pmm: - version: 2.41 description: "Percona Monitoring and Management: an open source database monitoring, observability and management tool ." - mongodb: + mongodb: - version: 7.0 description: "MongoDB is a document database designed for ease of application development and scaling." - clickhouse: + clickhouse: - version: 24 description: "ClickHouse is the fastest and most resource efficient open-source database for real-time apps and analytics." - kvrocks: + kvrocks: - version: 2.8 description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." - version: "2.10" description: "Apache Kvrocks is a distributed key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol." - yugabytedb: + yugabytedb: - version: 2024 description: "YugabyteDB is a high-performance distributed SQL database for powering global, internet-scale applications. " - nessie: + nessie: - version: "0.103" description: "Transactional Catalog for Data Lakes with Git-like semantics . " - lakefs: + lakefs: - version: "1.52" description: "LakeFS provides version control over the data lake, and uses Git-like semantics to create and access those versions. If you know git, you’ll be right at home with lakeFS. " victoriametrics: diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/Chart.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/Chart.yaml new file mode 100644 index 00000000..c775a086 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: "17" +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.1 +description: PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. +engine: gotpl +home: https://github.com/drycc/charts/tree/master/drycc/postgresql +icon: https://drycc.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + - patroni +maintainers: + - email: zhang.eamon@hotmail.com + name: zhangeamon +name: postgresql +sources: + - https://github.com/drycc-addons/ + - https://www.postgresql.org/ +version: "17.6" diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/README.md b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/README.md new file mode 100644 index 00000000..c407603f --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/README.md @@ -0,0 +1,166 @@ + +# Postgresql cluster addons +## Plans + +View and choose the service resource specifications you need. +``` + # drycc resources:plans postgresql-cluster +``` +### Resource specification list +| Resource Specification | Cores | MEMORY | Storage SIZE | +| :---: | :---: | :---: | :---: | +| standard-10 | 1C | 2G | 10G | +| standard-20 | 2C | 4G | 20G | +| standard-50 | 2C | 8G | 50G | +| standard-100 | 4C | 16G | 100G | +| standard-200 | 8C | 32G | 200G | +| standard-400 | 16C | 64G | 400G | +| standard-800 | 32C | 128G | 800G | + +In order to obtain a better experience, it is recommended not to exceed 80% usage of resource utilization for a long period of time. If there is a need for larger resource scale, please apply for private customization. + +## Create Postgresql Cluster Service instance + +- Create Postgresql service +``` +# drycc resources:create postgresql-cluster:standard-10 `my_pg_001` +``` +- View service status +``` +# drycc resources:describe `my_pg_001` +``` +- Bind service +``` +# drycc resources:bind `my_pg_001` +``` +- View resource status +``` +# drycc resources:describe `my_pg_001` +``` + +## Create Service with values file + +`vim values.yaml` +``` +# create or update pg instance template yaml +networkPolicy.allowNamespaces: + - mx-test1 +service.type: ClusterIP +metrics.enabled: true +backup: + # whether BackUP should be enabled + enabled: true + # Cron schedule for doing base backups + scheduleCronJob: "20 0 * * 0" + Amount of base backups to retain + retainBackups: 2 + s3: + awsAccessKeyID: "" + awsSecretAccessKey: "" + walGS3Prefix: "s3://xx" + awsEndpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 +``` +``` + drycc resources:create postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + +## Update Service +### Create app user and database + +- Login database web with admin user & password + +- Change administrator initial password +``` +ALTER USER administrator WITH ENCRYPTED PASSWORD 'newpassword'; +``` +- View total connections number in postgresql cluster; +``` +show max_connections ; +``` +- CREATE APP USER +``` +CREATE USER `myuser` WITH CONNECTION LIMIT `conn limit` LOGIN ENCRYPTED PASSWORD 'password'; +GRANT `myuser` to administrator ; +``` +- CREATE APP DATABASE +``` +CREATE DATABASE `mydb` OWNER `myuser`; + +``` +- CREATE EXTENSIONS +``` +CREATE EXTENSION pg_buffercache; +``` + +### Network Access + +Default access allow policy: only namespace scope. + +- allow `mx-test1` namespace access + +`vim values.yaml ` +``` +networkPolicy.allowNamespaces: + - mx-test1 +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + + - Assign external network IP address + +`vim values.yaml` +``` + service.type: LoadBlancer +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` +- View resource status +``` +# drycc resources:describe `my_pg_001` +``` + + ### Manger backup your data `Very important` + +`Strongly recommend enabling this feature.` +`Strongly recommend enabling this feature.` +`Strongly recommend enabling this feature.` + +PG data backup use S3 as backenp store. Choose an independent storage space `outside of the current environment` as your backup space. + +`vim values.yaml` +``` +backup: + # whether BackUP should be enabled + enabled: true + # Cron schedule for doing base backups + scheduleCronJob: "20 0 * * 0" + Amount of base backups to retain + retainBackups: 2 + s3: + awsAccessKeyID: DO9l771LqiwZkhhz + awsSecretAccessKey: R3Dv0NEmJBo8JFdn1q8jz49ArWwpDjFn + walGS3Prefix: mx-test +``` +``` +drycc resources:update postgresql-cluster:standard-10 `my_pg_001` -f ./values.yaml +``` + +You can modify multiple content at once, there is no need to modify part of it each time. + + +## Destroy Service + +- Unbind service first +``` +# drycc resources:unbind `my_pg_001` +``` +- Destroy service +``` +# drycc resources:destroy `my_pg_001` +``` +# 修改pg在容器中分配的动态共享内存不足的问题 +https://www.cnblogs.com/daniel-hutao/p/17903993.html \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/NOTES.txt b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/NOTES.txt new file mode 100644 index 00000000..22a4f2d2 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/NOTES.txt @@ -0,0 +1,25 @@ +Patroni can be accessed via port 5432 on the following DNS name from within your cluster: +{{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To get your password for superuser run: + + # superuser password + PGPASSWORD_SUPERUSER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-superuser}" | base64 --decode) + + # admin password + PGPASSWORD_ADMIN=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-admin}" | base64 --decode) + +To connect to your database: + +1. Run a postgres pod and connect using the psql cli: + # login as superuser + kubectl run -i --tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_SUPERUSER" \ + --command -- psql -U postgres \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres + + # login as admin + kubectl run -i -tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_ADMIN" \ + --command -- psql -U admin \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/_helpers.tpl b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/_helpers.tpl new file mode 100644 index 00000000..d5876632 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/_helpers.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "patroni.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "patroni.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "patroni.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use. +*/}} +{{- define "patroni.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "patroni.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createCronJob" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createLogicalBackupCronJob" -}} +{{- if and .Values.logicalbackup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for Postgresql HA patroni +*/}} +{{- define "patroni.createConfigmap" -}} +{{- if and .Values.preInitScript }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Create patroni envs. +*/}} +{{- define "patroni.envs" }} +{{- if .Values.kubernetes.configmaps.enable }} +- name: KUBERNETES_USE_CONFIGMAPS + value: "true" +{{- end }} +{{- if .Values.kubernetes.endpoints.enable }} +- name: PATRONI_KUBERNETES_USE_ENDPOINTS + value: 'true' +{{- end }} +- name: PATRONI_KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +- name: PATRONI_KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +- name: PATRONI_KUBERNETES_BYPASS_API_SERVICE + value: 'true' +- name: PATRONI_KUBERNETES_LABELS + value: '{application: {{ template "patroni.fullname" . }},release: {{ .Release.Name }},cluster-name: {{ template "patroni.fullname" . }}}' +- name: PATRONI_SUPERUSER_USERNAME + value: postgres +- name: PATRONI_SUPERUSER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser +- name: PATRONI_REPLICATION_USERNAME + value: standby +- name: PATRONI_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-replication +- name: PATRONI_REWIND_USERNAME + value: rewinder +- name: PATRONI_REWIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-rewind +- name: ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-user +- name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-password +- name: PATRONI_SCOPE + value: {{ template "patroni.fullname" . }} +- name: PATRONI_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: PATRONI_POSTGRESQL_DATA_DIR + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PATRONI_POSTGRESQL_PGPASS + value: /tmp/pgpass +- name: PATRONI_POSTGRESQL_LISTEN + value: '0.0.0.0:5432' +- name: PATRONI_RESTAPI_LISTEN + value: '0.0.0.0:8008' +{{- end -}} + +{{/* +Return true if a configmap object should be created for PG backup. +*/}} +{{- define "backup.createConfigmap" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Generate random password +*/}} + +{{/* +Get the super user password ; +*/}} +{{- define "credentials.superuserValue" }} +{{- if .Values.credentials.superuser }} + {{- .Values.credentials.superuser -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-superuser") -}} +{{- end -}} +{{- end }} + +{{/* +Get the rewind password ; +*/}} +{{- define "credentials.rewindValue" }} +{{- if .Values.credentials.rewind }} + {{- .Values.credentials.rewind -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-rewind") -}} +{{- end -}} +{{- end }} + +{{/* +Get the replication password ; +*/}} +{{- define "credentials.replicationValue" }} +{{- if .Values.credentials.replication }} + {{- .Values.credentials.replication -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-replication") -}} +{{- end -}} +{{- end }} + +{{/* +Get the administrator password ; +*/}} +{{- define "adminRole.passwordValue" }} +{{- if .Values.adminRole.password }} + {{- .Values.adminRole.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "admin-password") -}} +{{- end -}} +{{- end }} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} +{{- $len := (default 16 .Length) | int -}} +{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} +{{- if $obj }} +{{- index $obj .Key | b64dec -}} +{{- else -}} +{{- randAlphaNum $len -}} +{{- end -}} +{{- end }} + diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-backup.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-backup.yaml new file mode 100644 index 00000000..fdc62197 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-backup.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + backup.env: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.backupEnv "context" $ ) | nindent 4 }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-logicalbackup .yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-logicalbackup .yaml new file mode 100644 index 00000000..8de61100 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-logicalbackup .yaml @@ -0,0 +1,19 @@ +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + logicalbackup.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.logicalbackupScript "context" $ ) | nindent 4 }} + +{{- end }} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-patroni.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-patroni.yaml new file mode 100644 index 00000000..ad4b5849 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-patroni.yaml @@ -0,0 +1,20 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-patroni + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + pre_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.preInitScript "context" $ ) | nindent 4 }} + post_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postInitScript "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-postgresql.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-postgresql.yaml new file mode 100644 index 00000000..8aba698a --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cm-postgresql.yaml @@ -0,0 +1,18 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-postgresql + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom_conf.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postgresql.config "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cronjob.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cronjob.yaml new file mode 100644 index 00000000..495dfa7b --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/cronjob.yaml @@ -0,0 +1,43 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.backup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + curl "http://${REPLHOST}:9000/pg_backup" + env: + - name: REPLHOST + value: {{ include "patroni.fullname" . }}-repl +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/logicalbackup-cronjob.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/logicalbackup-cronjob.yaml new file mode 100644 index 00000000..071b9bd9 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/logicalbackup-cronjob.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.logicalbackup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-logicalbackup + image: "{{ .Values.logicalbackupImages.repository }}:{{ .Values.logicalbackupImages.tag }}" + imagePullPolicy: {{ .Values.logicalbackupImages.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + sh /opt/drycc/logicalbackup/logicalbackup.sh + env: + - name: PGHOST + value: {{ include "patroni.fullname" . }}-repl + - name: PGPORT + value: "5432" + - name: PGUSER + value: postgres + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: MINIO_BUCKET + value: {{ .Values.logicalbackup.minio.bucket }} + - name: MINIO_HOST + value: {{ .Values.logicalbackup.minio.endpoint }} + - name: MINIO_ACCESS_KEY + value: {{ .Values.logicalbackup.minio.access_key }} + - name: MINIO_SECRET_KEY + value: {{ .Values.logicalbackup.minio.secret_key }} + + volumeMounts: + - mountPath: "/opt/drycc/logicalbackup/" + name: logicalbackup-config + + volumes: + - name: logicalbackup-config + configMap: + name: {{ template "common.names.fullname" . }}-logicalbackup +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/networkpolicy.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/networkpolicy.yaml new file mode 100644 index 00000000..19ff2288 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/networkpolicy.yaml @@ -0,0 +1,54 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: patroni + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if eq .Values.service.type "ClusterIP" }} + ingress: + # Allow inbound connections + - ports: + - port: 5432 + - port: 9000 + - port: 80 + - port: 8008 + {{- if and .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{ end }} + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: backup + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + ingress: + - {} + {{- end }} +{{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/role.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/role.yaml new file mode 100644 index 00000000..8dec5309 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/role.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: + - create + - get + - list + - patch + - update + - watch + # delete is required only for 'patronictl remove' + - delete +- apiGroups: [""] + resources: ["services"] + verbs: + - create +- apiGroups: [""] + resources: ["endpoints"] + verbs: + - create + - get + - patch + - update + # the following three privileges are necessary only when using endpoints + - list + - watch + # delete is required only for for 'patronictl remove' + - delete + - deletecollection +- apiGroups: [""] + resources: ["pods"] + verbs: + - get + - list + - patch + - update + - watch +{{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/rolebinding.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/rolebinding.yaml new file mode 100644 index 00000000..5e15948f --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ template "patroni.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "patroni.fullname" . }} +{{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/sec.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/sec.yaml new file mode 100644 index 00000000..c2e13055 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/sec.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +type: Opaque +data: + password-superuser: {{ include "credentials.superuserValue" . | b64enc | quote }} + password-rewind: {{ include "credentials.rewindValue" . | b64enc | quote }} + password-replication: {{ include "credentials.replicationValue" . | b64enc | quote }} + admin-user: {{ .Values.adminRole.username | b64enc | quote }} + admin-password: {{ include "adminRole.passwordValue" . | b64enc | quote }} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/serviceaccount.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/serviceaccount.yaml new file mode 100644 index 00000000..e1b2ebf6 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "patroni.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml new file mode 100644 index 00000000..d826952c --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml @@ -0,0 +1,273 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + serviceName: {{ template "patroni.fullname" . }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + template: + metadata: + name: {{ template "patroni.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + spec: + {{- if .Values.patroni.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.patroni.nodeAffinityPreset.type "key" .Values.patroni.nodeAffinityPreset.key "values" .Values.patroni.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.patroni.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.nodeSelector "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "patroni.serviceAccountName" . }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + env: + {{- include "patroni.envs" . | indent 8 }} + {{- if .Values.env }} + {{- range $key, $val := .Values.env }} + - name: {{ $key | quote | upper }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + scheme: HTTP + path: /readiness + port: 8008 + initialDelaySeconds: 3 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /liveness + port: 8008 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /usr/bin/env + - bash + - -c + - | + # switch leader pod if the current pod is the leader + if curl --fail http://localhost:8008/read-write; then + init-stack patronictl switchover --force + fi + ports: + - containerPort: 8008 + protocol: TCP + - containerPort: 5432 + protocol: TCP + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/scripts/" + name: patroni-config + - mountPath: "/opt/drycc/postgresql/config/" + name: postgresql-config + # readOnly: true + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + - name: dshm + mountPath: /dev/shm + # readOnly: true + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: "{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}" + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + - name: DATA_SOURCE_NAME + value: {{ printf "postgresql://tea_mon:password@127.0.0.1:5432/postgres?sslmode=disable" }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPort }} + startupProbe: + initialDelaySeconds: 10 + tcpSocket: + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + command: + - /usr/bin/env + - bash + - -c + - | + python3 /opt/drycc/postgresql/pgbackup.py 0.0.0.0 9000 + env: + - name: PGHOST + value: localhost + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: PGUSER + value: postgres + - name: PGDATABASE + value: postgres + - name: PGPORT + value: "5432" + - name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" + ports: + - containerPort: 9000 + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ .Values.affinity | toYaml | indent 8 }} + {{- else if .Values.affinityTemplate }} + affinity: +{{ tpl .Values.affinityTemplate . | indent 8 }} + {{- end }} + volumes: + - name: patroni-config + configMap: + name: {{ template "common.names.fullname" . }}-patroni + - name: postgresql-config + configMap: + name: {{ template "common.names.fullname" . }}-postgresql + - name: backup-config + configMap: + name: {{ template "common.names.fullname" . }}-backup + {{- if not .Values.persistentVolume.enabled }} + - name: storage-volume + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + {{- if .Values.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + annotations: + {{- if .Values.persistentVolume.annotations }} +{{ toYaml .Values.persistentVolume.annotations | indent 8 }} + {{- end }} + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + accessModes: +{{ toYaml .Values.persistentVolume.accessModes | indent 8 }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-config.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-config.yaml new file mode 100644 index 00000000..5f7b0f60 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-config.yaml @@ -0,0 +1,11 @@ +# headless service to avoid deletion of patronidemo-config endpoint +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-config + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + clusterIP: None diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-master.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-master.yaml new file mode 100644 index 00000000..609ed5ba --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-master.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-master + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-metrics.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-metrics.yaml new file mode 100644 index 00000000..862c6a0c --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-metrics.yaml @@ -0,0 +1,32 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "patroni.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: patroni +{{- end }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-relp.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-relp.yaml new file mode 100644 index 00000000..252882b3 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc-relp.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-repl + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + - name: pgbackup + port: 9000 + targetPort: 9000 \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc.yaml new file mode 100644 index 00000000..ac0c2c44 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + type: ClusterIP + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml new file mode 100644 index 00000000..0e1de2f5 --- /dev/null +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml @@ -0,0 +1,440 @@ +replicaCount: 3 +diagnosticMode: + enable: false + +service: + type: ClusterIP + +image: + # Image was built from registry.drycc.cc/drycc-addons/patroni:3.2 + # https://github.com/zalando/spilo/tree/master/postgres-appliance + repository: registry.drycc.cc/drycc-addons/postgresql-patroni + tag: 17 + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + +logicalbackupImages: + repository: registry.drycc.cc/drycc-addons/postgresql-logicalbackup + tag: 17 + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + +# Credentials used by Patroni , passwd +# https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql +# https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst +credentials: + superuser: "" + rewind: "" + replication: "" + +adminRole: + username: administrator + password: "" + +# Distribution Configuration stores +# Please note that only one of the following stores should be enabled. +kubernetes: + endpoints: + enable: true + configmaps: + enable: false + +# Extra custom environment variables. +env: {} + +# +#custom patroni.yaml used by patroni boot +# configuration: {} +preInitScript: | + mkdir -p /home/postgres/pgdata/log + ln -sf /dev/stdout "/home/postgres/pgdata/log/postgresql.csv" + cat > /opt/drycc/postgresql/patroni.yml <<__EOF__ + log: + level: INFO + restapi: + listen: 0.0.0.0:8008 + connect_address: 0.0.0.0:8008 + bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + failsafe_mode: true + postgresql: + use_pg_rewind: true + use_slots: true + pg_hba: + - local all all peer + - host all tea_mon 127.0.0.1/32 trust + - host all all 0.0.0.0/0 scram-sha-256 + - host replication ${PATRONI_REPLICATION_USERNAME} 0.0.0.0/0 scram-sha-256 + - host replication postgres 0.0.0.0/0 scram-sha-256 + custom_conf: '/opt/drycc/postgresql/config/custom_conf.conf' + parameters: + max_connections: {{ .Values.patroni.pgParameters.max_connections }} + max_worker_processes: {{ .Values.patroni.pgParameters.max_worker_processes }} + max_parallel_workers: {{ .Values.patroni.pgParameters.max_parallel_workers }} + wal_level: logical + hot_standby: "on" + max_wal_senders: 10 + max_replication_slots: 10 + hot_standby_feedback: on + max_prepared_transactions: 0 + max_locks_per_transaction: 64 + wal_log_hints: "on" + wal_keep_size: "1 GB" + max_slot_wal_keep_size: {{ .Values.patroni.pgParameters.max_slot_wal_keep_size | quote }} + track_commit_timestamp: "off" + archive_mode: "on" + archive_timeout: 300s + archive_command: sh /opt/drycc/postgresql/walbackup.sh %p + # timescaledb.license: 'timescale' + shared_preload_libraries: 'auto_explain,pg_stat_statements,timescaledb' + log_destination: 'csvlog' + log_filename: postgresql.log + logging_collector: on + log_directory: /home/postgres/pgdata/log + log_min_messages: 'info' + log_min_duration_statement: 1000 + log_lock_waits: on + log_statement: 'ddl' + {{ if .Values.postgresql.timezone -}} timezone: {{ .Values.postgresql.timezone }} {{- end }} + initdb: + - auth-host: scram-sha-256 + - auth-local: trust + - encoding: UTF8 + - locale: en_US.UTF-8 + - data-checksums + post_bootstrap: sh /opt/drycc/postgresql/scripts/post_init.sh + restapi: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:8008' + postgresql: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:5432' + authentication: + superuser: + username: postgres + password: '${PATRONI_SUPERUSER_PASSWORD}' + replication: + username: standby + password: '${PATRONI_REPLICATION_PASSWORD}' + rewind: # Has no effect on postgres 10 and lower + username: rewinder + password: '${PATRONI_REWIND_PASSWORD}' + watchdog: + mode: off + __EOF__ + +postInitScript: | + #!/bin/bash + set -Eeu + # Create monitor user + psql -w -c "CREATE USER tea_mon ;GRANT pg_monitor TO tea_mon ;create extension pg_stat_statements;create extension pg_buffercache ;" + # Create admin user + if [[( -n "$ADMIN_USER") && ( -n "$ADMIN_PASSWORD")]]; then + + echo "Creating user ${ADMIN_USER}" + psql -w -c "CREATE USER ${ADMIN_USER} WITH SUPERUSER CREATEDB CREATEROLE CONNECTION LIMIT 10 LOGIN ENCRYPTED PASSWORD '${ADMIN_PASSWORD}'" + + else + echo "Skipping create admin user" + fi + psql -w -c "CHECKPOINT;CHECKPOINT;" + +backupEnv: | + #!/bin/bash + export USE_WALG={{ .Values.backup.enabled | quote }} + export BACKUP_NUM_TO_RETAIN={{ .Values.backup.retainBackups | quote}} + export WALG_BACKUP_THRESHOLD_MEGABYTES={{ .Values.backup.backupThresholdMegabytes | quote }} + export WALE_BACKUP_THRESHOLD_PERCENTAGE={{ .Values.backup.backupThresholdPercentage | quote }} + export AWS_ACCESS_KEY_ID={{ .Values.backup.s3.awsAccessKeyID | quote }} + export AWS_SECRET_ACCESS_KEY={{ .Values.backup.s3.awsSecretAccessKey | quote }} + export WALG_S3_PREFIX={{ .Values.backup.s3.walGS3Prefix | quote }} + export AWS_ENDPOINT={{ .Values.backup.s3.awsEndpoint | quote }} + export AWS_S3_FORCE_PATH_STYLE={{ .Values.backup.s3.awsS3ForcePathStyle | quote }} + export AWS_REGION={{ .Values.backup.s3.awsRegion | quote }} + +logicalbackupScript: | + #!/bin/bash + + # PostgreSQL 设置 + # POSTGRES_USER="postgres" + # POSTGRES_HOST="127.0.0.1" + + # MinIO 设置 + # MINIO_BUCKET="pgbackup" + # MINIO_HOST="http://localhost:9000" + # MINIO_ACCESS_KEY="admin123" + # MINIO_SECRET_KEY="admin123" + + # 设置 MinIO 客户端别名 + mc alias set myminio $MINIO_HOST $MINIO_ACCESS_KEY $MINIO_SECRET_KEY + + # 创建以当前日期和时间命名的备份目录 + BACKUP_DIR="$(date +%Y%m%d%H%M)" + MINIO_PATH="myminio/$MINIO_BUCKET/$BACKUP_DIR" + + # 备份全局对象 + echo "Backing up global objects to $MINIO_PATH/roles_globals.sql.gz" + pg_dumpall -g -U "$POSTGRES_USER" -h "$POSTGRES_HOST" | pigz | mc pipe "$MINIO_PATH/roles_globals.sql.gz" + + # 获取所有非模板数据库的列表 + DATABASES=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") + + # 为每个数据库执行备份 + for DB in $DATABASES; do + echo "Backing up $DB to $MINIO_PATH/$DB.sql.gz" + pg_dump -U "$POSTGRES_USER" -h "$POSTGRES_HOST" "$DB" | pigz | mc pipe "$MINIO_PATH/$DB.sql.gz" + done + + echo "Backup process completed!" + +postgresql: + timezone: + config: |- + log_min_duration_statement = 1000 + max_wal_size = 4GB + min_wal_size = 4GB + max_wal_senders = 10 + max_replication_slots = 10 + max_prepared_transactions = 0 + max_locks_per_transaction = 64 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: "2 GB" + + ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param patroni.podAffinityPreset Postgresql patroni pod affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param patroni.podAntiAffinityPreset Postgresql patroni pod anti-affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Postgresql Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param patroni.nodeAffinityPreset.type Postgresql patroni node affinity preset type. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param patroni.nodeAffinityPreset.key Postgresql patroni node label key to match Ignored if `patroni.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param patroni.nodeAffinityPreset.values Postgresql patroni node label values to match. Ignored if `patroni.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param patroni.affinity Affinity for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param patroni.nodeSelector Node labels for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + +## Postgresql Prometheus exporter parameters +## +metrics: + enabled: true + image: + repository: registry.drycc.cc/drycc-addons/postgres-exporter + tag: "0" + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database:.... + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + service: + ports: + metrics: 9187 + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + + customMetrics: {} + containerPort: 9187 + containerSecurityContext: + enabled: false + runAsUser: 1001 + runAsNonRoot: true + customLivenessProbe: {} + customReadinessProbe: + enabled: true + resources: + limits: + cpu: 100m + memory: 512Mi + hugepages-2Mi: 20Mi + requests: + cpu: 100m + memory: 512Mi + +logicalbackup: + enabled: false + scheduleCronJob: "22 0 * * 0" + minio: + used: true + buckect: "s3://xx" + access_key: "" + secret_key: "" + endpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +backup: + # Specifies whether Wal-G should be enabled + enabled: false + # Cron schedule for doing base backups + scheduleCronJob: "22 0 * * 0" + # Amount of base backups to retain + retainBackups: 2 + # Name of the secret that holds the credentials to the bucket + kubernetesSecret: + # Maximum size of the WAL segments accumulated after the base backup to + # consider WAL-G restore instead of pg_basebackup + backupThresholdMegabytes: 1024 + # Maximum ratio (in percents) of the accumulated WAL files to the base backup + # to consider WAL-G restore instead of pg_basebackup + backupThresholdPercentage: 30 + s3: + used: true + awsAccessKeyID: "" + awsSecretAccessKey: "" + walGS3Prefix: "s3://xx" + awsEndpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +logicalBackup: + enabled: false + +## persistentVolumeClaimRetentionPolicy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet +## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced +## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted +persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete +persistentVolume: + enabled: true + size: 10G + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + subPath: "" + mountPath: "/home/postgres/pgdata" + annotations: {} + accessModes: + - ReadWriteOnce + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 100m + memory: 512Mi + # hugepages-2Mi: 4Mi + requests: + cpu: 100m + memory: 512Mi + +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "1Gi" + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinityTemplate: | + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + application: {{ template "patroni.name" . }} + release: {{ .Release.Name | quote }} +affinity: {} +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: +## Postgresql Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port Postgresql is listening + ## on. When true, Postgresql will accept connections from any source + ## (with the correct destination port). + ## + allowCurrentNamespace: true + allowNamespaces: +clusterDomain: cluster.local diff --git a/addons/postgresql-cluster/17/meta.yaml b/addons/postgresql-cluster/17/meta.yaml new file mode 100644 index 00000000..d6e0090d --- /dev/null +++ b/addons/postgresql-cluster/17/meta.yaml @@ -0,0 +1,30 @@ +name: postgresql-cluster-17 +version: 17 +id: 1eefe6a4-9430-11f0-832f-fbf5fdedcb62 +description: "postgresql-cluster-17" +displayName: "postgresql-cluster-17" +metadata: + displayName: "postgresql-cluster-17" + provider: + name: drycc + supportURL: https://www.postgresql.org/ + documentationURL: https://github.com/drycc-addons/drycc-docker-postgresql-cluster +tags: postgresql-cluster +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "backup" + required: false + description: "Whether to use S3 for backup your data. default false . ps: Make sure there is a available S3 " +- name: "logicalbackup" + required: false + description: "Whether to use S3 for logical backup your data. default false . ps: Make sure there is a available S3 " +archive: false \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-16c64g400/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/meta.yaml b/addons/postgresql-cluster/17/plans/standard-16c64g400/meta.yaml new file mode 100644 index 00000000..3070475f --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c64g400" +id: 9c9d326e-9430-11f0-93d1-573b41ecaad5 +description: "PostgreSQL Cluster standard-16c64g400 plan: Disk 400Gi ,vCPUs 16 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-16c64g400" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml new file mode 100644 index 00000000..f4cd8ae8 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-400 +patroni: + pgParameters: + max_worker_processes: 32 + max_parallel_workers: 16 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '32 MB' + maintenance_work_mem = '520 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 8 + max_parallel_maintenance_workers = 8 + max_parallel_workers = 16 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 40GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 16000m + memory: 64Gi + hugepages-2Mi: 40Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 400Gi + +shmVolume: + sizeLimit: "32Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-2c4g20/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/meta.yaml b/addons/postgresql-cluster/17/plans/standard-2c4g20/meta.yaml new file mode 100644 index 00000000..04ae244b --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g20" +id: 68589728-9430-11f0-82a4-6b51ec33c9e0 +description: "PostgreSQL Cluster standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G , DB MAX Connection 1000" +displayName: "standard-2c4g20" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml new file mode 100644 index 00000000..bedec78d --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-20 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 1000 + max_slot_wal_keep_size: '2 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '1024 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '3 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '2 GB' + min_wal_size = '1 GB' + + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + memory: 4Gi + hugepages-2Mi: 20Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 20Gi + +shmVolume: + sizeLimit: "2Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-2c8g50/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/meta.yaml b/addons/postgresql-cluster/17/plans/standard-2c8g50/meta.yaml new file mode 100644 index 00000000..7eacdb2b --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c8g50" +id: 7a51990c-9430-11f0-9079-7f8ed47bf9e6 +description: "PostgreSQL Cluster standard-2c8g50 plan: Disk 50Gi ,vCPUs 2 , RAM 8G , DB MAX Connection 2000" +displayName: "standard-2c8g50" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml new file mode 100644 index 00000000..564731a7 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-50 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 2000 + max_slot_wal_keep_size: '5 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '2048 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '6 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '4 GB' + min_wal_size = '1 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + max_parallel_workers = 2 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 5GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + memory: 8Gi + hugepages-2Mi: 20Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 50Gi + +shmVolume: + sizeLimit: "4Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-32c128g800/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/meta.yaml b/addons/postgresql-cluster/17/plans/standard-32c128g800/meta.yaml new file mode 100644 index 00000000..9b5010a7 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c128g800" +id: ba142f96-9430-11f0-9e33-cf8191859471 +description: "PostgreSQL Cluster standard-32c128g800 plan: Disk 800Gi ,vCPUs 32 , RAM 128G , DB MAX Connection 2000" +displayName: "standard-32c128g800" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml new file mode 100644 index 00000000..3af68467 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '100 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '32768 MB' + work_mem = '64 MB' + maintenance_work_mem = '720 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '90 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '64 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 80GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 128Gi + hugepages-2Mi: 80Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 800Gi + +shmVolume: + sizeLimit: "64Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-32c64g4000/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/meta.yaml b/addons/postgresql-cluster/17/plans/standard-32c64g4000/meta.yaml new file mode 100644 index 00000000..660e39aa --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c64g4000" +id: ab26077a-9430-11f0-85c8-9328c7044661 +description: "PostgreSQL Cluster standard-32c64g4000 plan: Disk 4Ti ,vCPUs 32 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-32c64g4000" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml new file mode 100644 index 00000000..da70e1c9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: '200 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '256 MB' + maintenance_work_mem = '2048 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '32 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 100GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 64Gi + hugepages-2Mi: 80Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 4Ti + +shmVolume: + sizeLimit: "32Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-4c16g100/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/meta.yaml b/addons/postgresql-cluster/17/plans/standard-4c16g100/meta.yaml new file mode 100644 index 00000000..72bf59c8 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g100" +id: 87033bba-9430-11f0-8666-f7ef09538278 +description: "PostgreSQL Cluster standard-4c16g100 plan: Disk 100Gi ,vCPUs 4 , RAM 16G , DB MAX Connection 2000" +displayName: "standard-4c16g100" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml new file mode 100644 index 00000000..6c91f192 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-100 + +patroni: + pgParameters: + max_worker_processes: 8 + max_parallel_workers: 4 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '4096 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '11 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '8 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 2 + max_parallel_maintenance_workers = 2 + max_parallel_workers = 4 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 10GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 4000m + memory: 16Gi + hugepages-2Mi: 50Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 100Gi + +shmVolume: + sizeLimit: "8Gi" \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml new file mode 100644 index 00000000..5fc257f9 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DADABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/instance-schema.json b/addons/postgresql-cluster/17/plans/standard-8c32g200/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/meta.yaml b/addons/postgresql-cluster/17/plans/standard-8c32g200/meta.yaml new file mode 100644 index 00000000..d9b55c16 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g200" +id: 93b7b520-9430-11f0-b8c9-8f3b00d5dae8 +description: "PostgreSQL Cluster standard-8c32g200 plan: Disk 200Gi ,vCPUs 8 , RAM 32G , DB MAX Connection 2000" +displayName: "standard-8c32g200" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml new file mode 100644 index 00000000..dee10714 --- /dev/null +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-200 + +patroni: + pgParameters: + max_worker_processes: 16 + max_parallel_workers: 8 + max_connections: 2000 + max_slot_wal_keep_size: '10 GB' + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '8192 MB' + work_mem = '32 MB' + maintenance_work_mem = '420 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '22 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '3 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 4 + max_parallel_maintenance_workers = 4 + max_parallel_workers = 8 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = on + max_slot_wal_keep_size = 20GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 8000m + memory: 32Gi + hugepages-2Mi: 60Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 200Gi + +shmVolume: + sizeLimit: "16Gi" \ No newline at end of file From aec7efd42b9df95ab31d0fb63e5e3bed84eca402 Mon Sep 17 00:00:00 2001 From: Eamon Date: Sun, 28 Sep 2025 10:30:02 +0800 Subject: [PATCH 77/93] chore(mysql-cluster): optimise mySQL router configuration --- .../templates/router/statefulset.yaml | 2 +- .../8.0/chart/mysql-cluster/values.yaml | 61 +++++++++---------- 2 files changed, 30 insertions(+), 33 deletions(-) diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/router/statefulset.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/router/statefulset.yaml index 9ef9e93e..bb90e249 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/router/statefulset.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/templates/router/statefulset.yaml @@ -127,7 +127,7 @@ spec: - | export MYSQL_MASTER_HOST=${MYSQL_HOST}-0.${MYSQL_HOST} - echo ${MYSQL_ROOT_PASSWORD} | mysqlrouter --bootstrap root@${MYSQL_MASTER_HOST}:$(MYSQL_PORT_NUMBER) -d /opt/drycc/mysql/conf/router --name mxrouter --force + echo ${MYSQL_ROOT_PASSWORD} | mysqlrouter --bootstrap root@${MYSQL_MASTER_HOST}:$(MYSQL_PORT_NUMBER) -d /opt/drycc/mysql/conf/router --name mxrouter --conf-set-option=metadata_cache:bootstrap.ttl=5 --conf-set-option=metadata_cache:bootstrap.auth_cache_refresh_interval=300 --force env: - name: MYSQL_HOST value: {{ include "mysql.primary.fullname" . }} diff --git a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml index 16bd4e37..0271f35a 100644 --- a/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml +++ b/addons/mysql-cluster/8.0/chart/mysql-cluster/values.yaml @@ -149,9 +149,9 @@ auth: ## initdbScripts: ## my_init_script.sh: | ## #!/bin/bash -## echo "Do something." +## echo "Do something." ## -initdbScripts: +initdbScripts: int-mysql.sh: | #!/bin/bash set -ex @@ -211,14 +211,14 @@ primary: ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file ## maxConnectionLimit: 2000 - ## against plans + ## against plans extraConfig: | [mysqld] innodb_io_capacity=2000 innodb_io_capacity_max=3000 max_connect_errors=1000000 thread_cache_size=200 - + configuration: |- [mysqld] # server @@ -250,7 +250,7 @@ primary: default-time-zone='+8:00' local_infile=ON # Replication - + log_bin=mysql-bin gtid_mode=ON enforce_gtid_consistency=ON @@ -266,18 +266,18 @@ primary: replica_parallel_workers=8 # Group Replication Settings - + [client] port=3306 socket=/opt/drycc/mysql/tmp/mysql.sock default-character-set=UTF8MB4 plugin_dir=/opt/drycc/mysql/lib/plugin - + [manager] port=3306 socket=/opt/drycc/mysql/tmp/mysql.sock pid-file=/opt/drycc/mysql/tmp/mysqld.pid - + !includedir /drycc/mysql/ ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. @@ -593,20 +593,19 @@ primary: podLabels: {} cluster: - ## @param router.name Name of the cluster + ## @param router.name Name of the cluster ## name: cluster image: registry: registry.drycc.cc - repository: drycc-addons/mysql-shell + repository: drycc-addons/mysql-shell tag: "8.0" digest: "" - pullPolicy: IfNotPresent - + pullPolicy: IfNotPresent router: - ## @param router.name Name of the router + ## @param router.name Name of the router ## name: router ## @param router.replicaCount Number of MySQL secondary replicas @@ -725,7 +724,7 @@ router: ## cpu: 250m ## memory: 256Mi ## - limits: + limits: cpu: 250m memory: 256Mi ## Examples: @@ -733,7 +732,7 @@ router: ## cpu: 250m ## memory: 256Mi ## - requests: + requests: cpu: 250m memory: 256Mi ## Configure extra options for liveness probe @@ -749,8 +748,8 @@ router: enabled: true initialDelaySeconds: 5 periodSeconds: 10 - timeoutSeconds: 10 - failureThreshold: 3 + timeoutSeconds: 30 + failureThreshold: 10 successThreshold: 1 ## Configure extra options for readiness probe ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes @@ -987,7 +986,7 @@ rbac: networkPolicy: ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources ## - enabled: true + enabled: true ## @param networkPolicy.allowExternal The Policy model to apply. ## When set to false, only pods with the correct ## client label will have network access to the port MySQL is listening @@ -996,7 +995,6 @@ networkPolicy: ## allowCurrentNamespace: true allowNamespaces: [] - ## @section Volume Permissions parameters @@ -1019,7 +1017,7 @@ volumePermissions: repository: drycc/base tag: bookworm digest: "" - pullPolicy: IfNotPresent + pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ @@ -1039,7 +1037,7 @@ volumePermissions: metrics: ## @param metrics.enabled Start a side-car prometheus exporter ## - enabled: true + enabled: true ## @param metrics.image.registry Exporter image registry ## @param metrics.image.repository Exporter image repository ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) @@ -1114,10 +1112,9 @@ metrics: ## - --collect.heartbeat.table ## - extraArgs: - primary: - - --collect.info_schema.processlist + primary: + - --collect.info_schema.processlist - --collect.perf_schema.replication_group_members - --collect.perf_schema.replication_group_member_stats - --collect.perf_schema.replication_applier_status_by_worker @@ -1137,14 +1134,14 @@ metrics: ## limits: ## cpu: 100m ## memory: 256Mi - limits: + limits: cpu: 100m memory: 256Mi ## Examples: ## requests: ## cpu: 100m ## memory: 256Mi - requests: + requests: cpu: 100m memory: 256Mi containerSecurityContext: @@ -1261,13 +1258,13 @@ metrics: ## summary: MariaDB instance is down ## rules: [] - + backup: - enabled: false + enabled: false schedule: "" - s3: + s3: endpoint: "" bucketName: "" - accessKey: "" - secretKey: "" - region: us-west-1 \ No newline at end of file + accessKey: "" + secretKey: "" + region: us-west-1 From fcd413a69e05955493100e1d8375de48319f70c3 Mon Sep 17 00:00:00 2001 From: Eamon Date: Fri, 21 Nov 2025 15:02:58 +0800 Subject: [PATCH 78/93] chore(postgresql): adjust params (#123) --- .../templates/statefulset.yaml | 2 +- .../15/chart/postgresql-cluster/values.yaml | 41 +++++++++---------- .../15/plans/standard-16c64g400/values.yaml | 8 ++-- .../15/plans/standard-2c4g20/values.yaml | 8 ++-- .../15/plans/standard-2c8g50/values.yaml | 8 ++-- .../15/plans/standard-32c128g800/values.yaml | 8 ++-- .../15/plans/standard-32c64g4000/values.yaml | 8 ++-- .../15/plans/standard-4c16g100/values.yaml | 10 ++--- .../15/plans/standard-8c32g200/values.yaml | 8 ++-- .../templates/statefulset.yaml | 2 +- .../chart/postgresql-cluster-16/values.yaml | 41 +++++++++---------- .../16/plans/standard-16c64g400/values.yaml | 8 ++-- .../16/plans/standard-2c4g20/values.yaml | 8 ++-- .../16/plans/standard-2c8g50/values.yaml | 8 ++-- .../16/plans/standard-32c128g800/values.yaml | 8 ++-- .../16/plans/standard-32c64g4000/values.yaml | 8 ++-- .../16/plans/standard-4c16g100/values.yaml | 8 ++-- .../16/plans/standard-8c32g200/values.yaml | 8 ++-- .../templates/statefulset.yaml | 2 +- .../chart/postgresql-cluster-17/values.yaml | 4 +- .../17/plans/standard-16c64g400/values.yaml | 10 ++--- .../17/plans/standard-2c4g20/values.yaml | 10 ++--- .../17/plans/standard-2c8g50/values.yaml | 10 ++--- .../17/plans/standard-32c128g800/values.yaml | 8 ++-- .../17/plans/standard-32c64g4000/values.yaml | 8 ++-- .../17/plans/standard-4c16g100/values.yaml | 10 ++--- .../17/plans/standard-8c32g200/values.yaml | 10 ++--- 27 files changed, 133 insertions(+), 139 deletions(-) diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/statefulset.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/statefulset.yaml index d826952c..3f1efb1b 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/statefulset.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/templates/statefulset.yaml @@ -251,7 +251,7 @@ spec: name: storage-volume annotations: {{- if .Values.persistentVolume.annotations }} -{{ toYaml .Values.persistentVolume.annotations | indent 8 }} +{{ toYaml .Values.persistentVolume.annotations | indent 10 }} {{- end }} labels: application: {{ template "patroni.fullname" . }} diff --git a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml index 7d103fd1..8ed9f165 100644 --- a/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml +++ b/addons/postgresql-cluster/15/chart/postgresql-cluster/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 -diagnosticMode: +diagnosticMode: enable: false service: - type: ClusterIP + type: ClusterIP image: # Image was built from registry.drycc.cc/drycc-addons/patroni:3.2 @@ -11,13 +11,13 @@ image: repository: registry.drycc.cc/drycc-addons/postgresql-patroni tag: 15 # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" -logicalbackupImages: +logicalbackupImages: repository: registry.drycc.cc/drycc-addons/postgresql-logicalbackup tag: 15 # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" # Credentials used by Patroni , passwd # https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql @@ -188,8 +188,7 @@ logicalbackupScript: | echo "Backup process completed!" - -postgresql: +postgresql: config: |- log_min_duration_statement = 1000 max_wal_size = 4GB @@ -204,8 +203,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '2 GB' - + max_slot_wal_keep_size: "2 GB" ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ @@ -251,12 +249,12 @@ patroni: ## Postgresql Prometheus exporter parameters ## metrics: - enabled: true + enabled: true image: repository: registry.drycc.cc/drycc-addons/postgres-exporter tag: "0" # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" ## @param metrics.customMetrics Define additional custom metrics ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file ## customMetrics: @@ -278,14 +276,14 @@ metrics: ## Values: ClientIP or None ## ref: https://kubernetes.io/docs/user-guide/services/ ## - sessionAffinity: None + sessionAffinity: None ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint ## annotations: prometheus.io/scrape: "true" prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" - customMetrics: {} + customMetrics: {} containerPort: 9187 containerSecurityContext: enabled: false @@ -293,8 +291,8 @@ metrics: runAsNonRoot: true customLivenessProbe: {} customReadinessProbe: - enabled: true - resources: + enabled: true + resources: limits: cpu: 100m memory: 512Mi @@ -303,7 +301,7 @@ metrics: cpu: 100m memory: 512Mi -logicalbackup: +logicalbackup: enabled: false scheduleCronJob: "22 0 * * 0" minio: @@ -368,13 +366,13 @@ persistentVolume: accessModes: - ReadWriteOnce -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 100m memory: 512Mi - # hugepages-2Mi: 4Mi + hugepages-2Mi: 4Mi requests: cpu: 100m memory: 512Mi @@ -408,7 +406,6 @@ affinityTemplate: | application: {{ template "patroni.name" . }} release: {{ .Release.Name | quote }} affinity: {} - ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## @@ -429,7 +426,7 @@ serviceAccount: networkPolicy: ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources ## - enabled: true + enabled: true ## @param networkPolicy.allowExternal The Policy model to apply. ## When set to false, only pods with the correct ## client label will have network access to the port Postgresql is listening @@ -437,5 +434,5 @@ networkPolicy: ## (with the correct destination port). ## allowCurrentNamespace: true - allowNamespaces: -clusterDomain: cluster.local \ No newline at end of file + allowNamespaces: +clusterDomain: cluster.local diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml index f4cd8ae8..047deadd 100644 --- a/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-16c64g400/values.yaml @@ -6,7 +6,7 @@ patroni: max_worker_processes: 32 max_parallel_workers: 16 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -57,12 +57,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 40GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -78,4 +78,4 @@ persistentVolume: size: 400Gi shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml index bedec78d..8795c9b4 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c4g20/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 1000 - max_slot_wal_keep_size: '2 GB' + max_slot_wal_keep_size: "2 GB" postgresql: config: |- @@ -58,11 +58,11 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -78,4 +78,4 @@ persistentVolume: size: 20Gi shmVolume: - sizeLimit: "2Gi" \ No newline at end of file + sizeLimit: "2Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml index 564731a7..4263ffa2 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c8g50/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 2000 - max_slot_wal_keep_size: '5 GB' + max_slot_wal_keep_size: "5 GB" postgresql: config: |- @@ -59,12 +59,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 5GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -80,4 +80,4 @@ persistentVolume: size: 50Gi shmVolume: - sizeLimit: "4Gi" \ No newline at end of file + sizeLimit: "4Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml index 3af68467..c2ca0477 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c128g800/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '100 GB' + max_slot_wal_keep_size: "100 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 80GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 800Gi shmVolume: - sizeLimit: "64Gi" \ No newline at end of file + sizeLimit: "64Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml index da70e1c9..56fc31f5 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c64g4000/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '200 GB' + max_slot_wal_keep_size: "200 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 100GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 4Ti shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml index 6c91f192..effda2a8 100644 --- a/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-4c16g100/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 8 max_parallel_workers: 4 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -59,12 +59,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on - max_slot_wal_keep_size = 10GB + jit = off + max_slot_wal_keep_size = 10GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -80,4 +80,4 @@ persistentVolume: size: 100Gi shmVolume: - sizeLimit: "8Gi" \ No newline at end of file + sizeLimit: "8Gi" diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml index dee10714..03671b6c 100644 --- a/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/15/plans/standard-8c32g200/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 16 max_parallel_workers: 8 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 20GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 200Gi shmVolume: - sizeLimit: "16Gi" \ No newline at end of file + sizeLimit: "16Gi" diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml index d826952c..3f1efb1b 100644 --- a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/templates/statefulset.yaml @@ -251,7 +251,7 @@ spec: name: storage-volume annotations: {{- if .Values.persistentVolume.annotations }} -{{ toYaml .Values.persistentVolume.annotations | indent 8 }} +{{ toYaml .Values.persistentVolume.annotations | indent 10 }} {{- end }} labels: application: {{ template "patroni.fullname" . }} diff --git a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml index fcf73961..00947615 100644 --- a/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml +++ b/addons/postgresql-cluster/16/chart/postgresql-cluster-16/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 -diagnosticMode: +diagnosticMode: enable: false service: - type: ClusterIP + type: ClusterIP image: # Image was built from registry.drycc.cc/drycc-addons/patroni:3.2 @@ -11,13 +11,13 @@ image: repository: registry.drycc.cc/drycc-addons/postgresql-patroni tag: 16 # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" -logicalbackupImages: +logicalbackupImages: repository: registry.drycc.cc/drycc-addons/postgresql-logicalbackup tag: 16 # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" # Credentials used by Patroni , passwd # https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql @@ -188,8 +188,7 @@ logicalbackupScript: | echo "Backup process completed!" - -postgresql: +postgresql: config: |- log_min_duration_statement = 1000 max_wal_size = 4GB @@ -204,8 +203,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '2 GB' - + max_slot_wal_keep_size: "2 GB" ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ @@ -251,12 +249,12 @@ patroni: ## Postgresql Prometheus exporter parameters ## metrics: - enabled: true + enabled: true image: repository: registry.drycc.cc/drycc-addons/postgres-exporter tag: "0" # IfNotPresent , Always - pullPolicy: 'IfNotPresent' + pullPolicy: "IfNotPresent" ## @param metrics.customMetrics Define additional custom metrics ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file ## customMetrics: @@ -278,14 +276,14 @@ metrics: ## Values: ClientIP or None ## ref: https://kubernetes.io/docs/user-guide/services/ ## - sessionAffinity: None + sessionAffinity: None ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint ## annotations: prometheus.io/scrape: "true" prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" - customMetrics: {} + customMetrics: {} containerPort: 9187 containerSecurityContext: enabled: false @@ -293,8 +291,8 @@ metrics: runAsNonRoot: true customLivenessProbe: {} customReadinessProbe: - enabled: true - resources: + enabled: true + resources: limits: cpu: 100m memory: 512Mi @@ -303,7 +301,7 @@ metrics: cpu: 100m memory: 512Mi -logicalbackup: +logicalbackup: enabled: false scheduleCronJob: "22 0 * * 0" minio: @@ -368,13 +366,13 @@ persistentVolume: accessModes: - ReadWriteOnce -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 100m memory: 512Mi - # hugepages-2Mi: 4Mi + hugepages-2Mi: 4Mi requests: cpu: 100m memory: 512Mi @@ -408,7 +406,6 @@ affinityTemplate: | application: {{ template "patroni.name" . }} release: {{ .Release.Name | quote }} affinity: {} - ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## @@ -429,7 +426,7 @@ serviceAccount: networkPolicy: ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources ## - enabled: true + enabled: true ## @param networkPolicy.allowExternal The Policy model to apply. ## When set to false, only pods with the correct ## client label will have network access to the port Postgresql is listening @@ -437,5 +434,5 @@ networkPolicy: ## (with the correct destination port). ## allowCurrentNamespace: true - allowNamespaces: -clusterDomain: cluster.local \ No newline at end of file + allowNamespaces: +clusterDomain: cluster.local diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml index f4cd8ae8..047deadd 100644 --- a/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/values.yaml @@ -6,7 +6,7 @@ patroni: max_worker_processes: 32 max_parallel_workers: 16 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -57,12 +57,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 40GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -78,4 +78,4 @@ persistentVolume: size: 400Gi shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml index bedec78d..8795c9b4 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 1000 - max_slot_wal_keep_size: '2 GB' + max_slot_wal_keep_size: "2 GB" postgresql: config: |- @@ -58,11 +58,11 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -78,4 +78,4 @@ persistentVolume: size: 20Gi shmVolume: - sizeLimit: "2Gi" \ No newline at end of file + sizeLimit: "2Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml index 564731a7..4263ffa2 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 2000 - max_slot_wal_keep_size: '5 GB' + max_slot_wal_keep_size: "5 GB" postgresql: config: |- @@ -59,12 +59,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 5GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -80,4 +80,4 @@ persistentVolume: size: 50Gi shmVolume: - sizeLimit: "4Gi" \ No newline at end of file + sizeLimit: "4Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml index 3af68467..c2ca0477 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '100 GB' + max_slot_wal_keep_size: "100 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 80GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 800Gi shmVolume: - sizeLimit: "64Gi" \ No newline at end of file + sizeLimit: "64Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml index da70e1c9..56fc31f5 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '200 GB' + max_slot_wal_keep_size: "200 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 100GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 4Ti shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml index 6c91f192..ea889707 100644 --- a/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 8 max_parallel_workers: 4 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -59,12 +59,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 10GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -80,4 +80,4 @@ persistentVolume: size: 100Gi shmVolume: - sizeLimit: "8Gi" \ No newline at end of file + sizeLimit: "8Gi" diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml index dee10714..03671b6c 100644 --- a/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 16 max_parallel_workers: 8 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 20GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 200Gi shmVolume: - sizeLimit: "16Gi" \ No newline at end of file + sizeLimit: "16Gi" diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml index d826952c..3f1efb1b 100644 --- a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/templates/statefulset.yaml @@ -251,7 +251,7 @@ spec: name: storage-volume annotations: {{- if .Values.persistentVolume.annotations }} -{{ toYaml .Values.persistentVolume.annotations | indent 8 }} +{{ toYaml .Values.persistentVolume.annotations | indent 10 }} {{- end }} labels: application: {{ template "patroni.fullname" . }} diff --git a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml index 0e1de2f5..fcb3407e 100644 --- a/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml +++ b/addons/postgresql-cluster/17/chart/postgresql-cluster-17/values.yaml @@ -297,8 +297,8 @@ metrics: resources: limits: cpu: 100m - memory: 512Mi hugepages-2Mi: 20Mi + memory: 512Mi requests: cpu: 100m memory: 512Mi @@ -373,8 +373,8 @@ resources: # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 100m + hugepages-2Mi: 4Mi memory: 512Mi - # hugepages-2Mi: 4Mi requests: cpu: 100m memory: 512Mi diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml index f4cd8ae8..72b92be7 100644 --- a/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/values.yaml @@ -6,7 +6,7 @@ patroni: max_worker_processes: 32 max_parallel_workers: 16 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -57,18 +57,18 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 40GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 16000m - memory: 64Gi hugepages-2Mi: 40Mi + memory: 64Gi requests: cpu: 100m memory: 1Gi @@ -78,4 +78,4 @@ persistentVolume: size: 400Gi shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml index bedec78d..098a0a08 100644 --- a/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 1000 - max_slot_wal_keep_size: '2 GB' + max_slot_wal_keep_size: "2 GB" postgresql: config: |- @@ -58,17 +58,17 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 2000m - memory: 4Gi hugepages-2Mi: 20Mi + memory: 4Gi requests: cpu: 100m memory: 1Gi @@ -78,4 +78,4 @@ persistentVolume: size: 20Gi shmVolume: - sizeLimit: "2Gi" \ No newline at end of file + sizeLimit: "2Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml index 564731a7..affca346 100644 --- a/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 4 max_parallel_workers: 2 max_connections: 2000 - max_slot_wal_keep_size: '5 GB' + max_slot_wal_keep_size: "5 GB" postgresql: config: |- @@ -59,18 +59,18 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 5GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 2000m - memory: 8Gi hugepages-2Mi: 20Mi + memory: 8Gi requests: cpu: 100m memory: 1Gi @@ -80,4 +80,4 @@ persistentVolume: size: 50Gi shmVolume: - sizeLimit: "4Gi" \ No newline at end of file + sizeLimit: "4Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml index 3af68467..c2ca0477 100644 --- a/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '100 GB' + max_slot_wal_keep_size: "100 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 80GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 800Gi shmVolume: - sizeLimit: "64Gi" \ No newline at end of file + sizeLimit: "64Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml index da70e1c9..56fc31f5 100644 --- a/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 64 max_parallel_workers: 32 max_connections: 2000 - max_slot_wal_keep_size: '200 GB' + max_slot_wal_keep_size: "200 GB" postgresql: config: |- @@ -58,12 +58,12 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 100GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: @@ -79,4 +79,4 @@ persistentVolume: size: 4Ti shmVolume: - sizeLimit: "32Gi" \ No newline at end of file + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml index 6c91f192..83ac2774 100644 --- a/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 8 max_parallel_workers: 4 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -59,18 +59,18 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 10GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 4000m - memory: 16Gi hugepages-2Mi: 50Mi + memory: 16Gi requests: cpu: 100m memory: 1Gi @@ -80,4 +80,4 @@ persistentVolume: size: 100Gi shmVolume: - sizeLimit: "8Gi" \ No newline at end of file + sizeLimit: "8Gi" diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml index dee10714..297ca6ae 100644 --- a/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/values.yaml @@ -7,7 +7,7 @@ patroni: max_worker_processes: 16 max_parallel_workers: 8 max_connections: 2000 - max_slot_wal_keep_size: '10 GB' + max_slot_wal_keep_size: "10 GB" postgresql: config: |- @@ -58,18 +58,18 @@ postgresql: # Advanced features enable_partitionwise_join = on enable_partitionwise_aggregate = on - jit = on + jit = off max_slot_wal_keep_size = 20GB track_wal_io_timing = on maintenance_io_concurrency = 100 -resources: +resources: # If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 8000m - memory: 32Gi hugepages-2Mi: 60Mi + memory: 32Gi requests: cpu: 100m memory: 1Gi @@ -79,4 +79,4 @@ persistentVolume: size: 200Gi shmVolume: - sizeLimit: "16Gi" \ No newline at end of file + sizeLimit: "16Gi" From 9925d76bc2037bc374c271e20a726c925f2ae00e Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 25 Nov 2025 17:04:18 +0800 Subject: [PATCH 79/93] chore(redis): redis proxy password --- .../redis/7.0/chart/redis/templates/sentinel/statefulset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/redis/7.0/chart/redis/templates/sentinel/statefulset.yaml b/addons/redis/7.0/chart/redis/templates/sentinel/statefulset.yaml index 1ad76950..eace1793 100644 --- a/addons/redis/7.0/chart/redis/templates/sentinel/statefulset.yaml +++ b/addons/redis/7.0/chart/redis/templates/sentinel/statefulset.yaml @@ -465,7 +465,7 @@ spec: -listen :{{ .Values.proxy.containerPorts.proxy }} \ -master {{ .Values.sentinel.masterSet }} \ -sentinel-addr ${POD_IP}:{{ .Values.sentinel.containerPorts.sentinel }} \ - -sentinel-pass $(REDIS_PASSWORD) \ + -sentinel-pass "$(REDIS_PASSWORD)" \ -sentinel-user "" \ -max-procs={{ .Values.proxy.maxProcs }} {{- end }} From 0088c48997d75f748868a0cc97e8006eef36b483 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Tue, 16 Dec 2025 14:04:04 +0800 Subject: [PATCH 80/93] feat(addons): add etcd --- addons/etcd/3.6/chart/etcd-3.6/CHANGELOG.md | 1877 +++++++++++++++++ addons/etcd/3.6/chart/etcd-3.6/Chart.yaml | 35 + addons/etcd/3.6/chart/etcd-3.6/README.md | 899 ++++++++ .../3.6/chart/etcd-3.6/templates/NOTES.txt | 121 ++ .../3.6/chart/etcd-3.6/templates/_helpers.tpl | 213 ++ .../chart/etcd-3.6/templates/configmap.yaml | 20 + .../etcd-3.6/templates/cronjob-defrag.yaml | 169 ++ .../templates/cronjob-snapshotter.yaml | 171 ++ .../chart/etcd-3.6/templates/extra-list.yaml | 9 + .../etcd-3.6/templates/networkpolicy.yaml | 100 + .../3.6/chart/etcd-3.6/templates/pdb.yaml | 28 + .../chart/etcd-3.6/templates/podmonitor.yaml | 46 + .../templates/preupgrade-hook-job.yaml | 208 ++ .../etcd-3.6/templates/prometheusrule.yaml | 24 + .../3.6/chart/etcd-3.6/templates/secrets.yaml | 20 + .../etcd-3.6/templates/serviceaccount.yaml | 19 + .../etcd-3.6/templates/snapshot-pvc.yaml | 25 + .../chart/etcd-3.6/templates/statefulset.yaml | 470 +++++ .../etcd-3.6/templates/svc-headless.yaml | 57 + .../3.6/chart/etcd-3.6/templates/svc.yaml | 77 + .../etcd-3.6/templates/token-secrets.yaml | 19 + addons/etcd/3.6/chart/etcd-3.6/values.yaml | 1275 +++++++++++ addons/etcd/3.6/meta.yaml | 63 + .../3.6/plans/standard-16c32g3w/bind.yaml | 42 + .../standard-16c32g3w/instance-schema.json | 12 + .../3.6/plans/standard-16c32g3w/meta.yaml | 6 + .../3.6/plans/standard-16c32g3w/values.yaml | 48 + .../etcd/3.6/plans/standard-1c2g3w/bind.yaml | 42 + .../standard-1c2g3w/instance-schema.json | 12 + .../etcd/3.6/plans/standard-1c2g3w/meta.yaml | 6 + .../3.6/plans/standard-1c2g3w/values.yaml | 48 + .../etcd/3.6/plans/standard-2c4g3w/bind.yaml | 42 + .../standard-2c4g3w/instance-schema.json | 12 + .../etcd/3.6/plans/standard-2c4g3w/meta.yaml | 6 + .../3.6/plans/standard-2c4g3w/values.yaml | 48 + .../etcd/3.6/plans/standard-4c8g3w/bind.yaml | 42 + .../standard-4c8g3w/instance-schema.json | 12 + .../etcd/3.6/plans/standard-4c8g3w/meta.yaml | 6 + .../3.6/plans/standard-4c8g3w/values.yaml | 48 + .../etcd/3.6/plans/standard-8c16g3w/bind.yaml | 42 + .../standard-8c16g3w/instance-schema.json | 12 + .../etcd/3.6/plans/standard-8c16g3w/meta.yaml | 6 + .../3.6/plans/standard-8c16g3w/values.yaml | 48 + addons/index.yaml | 3 + 44 files changed, 6488 insertions(+) create mode 100644 addons/etcd/3.6/chart/etcd-3.6/CHANGELOG.md create mode 100644 addons/etcd/3.6/chart/etcd-3.6/Chart.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/README.md create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/NOTES.txt create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/_helpers.tpl create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/configmap.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-defrag.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-snapshotter.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/extra-list.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/networkpolicy.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/pdb.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/podmonitor.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/preupgrade-hook-job.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/prometheusrule.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/secrets.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/serviceaccount.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/snapshot-pvc.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/statefulset.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/svc-headless.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/svc.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/templates/token-secrets.yaml create mode 100644 addons/etcd/3.6/chart/etcd-3.6/values.yaml create mode 100644 addons/etcd/3.6/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml create mode 100644 addons/etcd/3.6/plans/standard-16c32g3w/instance-schema.json create mode 100644 addons/etcd/3.6/plans/standard-16c32g3w/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-16c32g3w/values.yaml create mode 100644 addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml create mode 100644 addons/etcd/3.6/plans/standard-1c2g3w/instance-schema.json create mode 100644 addons/etcd/3.6/plans/standard-1c2g3w/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-1c2g3w/values.yaml create mode 100644 addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml create mode 100644 addons/etcd/3.6/plans/standard-2c4g3w/instance-schema.json create mode 100644 addons/etcd/3.6/plans/standard-2c4g3w/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-2c4g3w/values.yaml create mode 100644 addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml create mode 100644 addons/etcd/3.6/plans/standard-4c8g3w/instance-schema.json create mode 100644 addons/etcd/3.6/plans/standard-4c8g3w/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-4c8g3w/values.yaml create mode 100644 addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml create mode 100644 addons/etcd/3.6/plans/standard-8c16g3w/instance-schema.json create mode 100644 addons/etcd/3.6/plans/standard-8c16g3w/meta.yaml create mode 100644 addons/etcd/3.6/plans/standard-8c16g3w/values.yaml diff --git a/addons/etcd/3.6/chart/etcd-3.6/CHANGELOG.md b/addons/etcd/3.6/chart/etcd-3.6/CHANGELOG.md new file mode 100644 index 00000000..821003a8 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/CHANGELOG.md @@ -0,0 +1,1877 @@ +# Changelog + +## 12.0.18 (2025-08-14) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references ([#35870](https://github.com/bitnami/charts/pull/35870)) + +## 12.0.17 (2025-08-07) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35604) ([ddedccc](https://github.com/bitnami/charts/commit/ddedccca22495fb8e688a692f096419a80218d57)), closes [#35604](https://github.com/bitnami/charts/issues/35604) + +## 12.0.16 (2025-08-07) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35560) ([927680c](https://github.com/bitnami/charts/commit/927680c92928e98e7ea4d409d6e87726be9bed06)), closes [#35560](https://github.com/bitnami/charts/issues/35560) + +## 12.0.15 (2025-08-07) + +* [bitnami/*] docs: update BSI warning on charts' notes (#35340) ([07483a5](https://github.com/bitnami/charts/commit/07483a5ed964b409266dc025e4b55bf2eb0f621c)), closes [#35340](https://github.com/bitnami/charts/issues/35340) +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35468) ([d4a99d5](https://github.com/bitnami/charts/commit/d4a99d5a500158023662829d1990e4a01c3e5d1f)), closes [#35468](https://github.com/bitnami/charts/issues/35468) + +## 12.0.14 (2025-07-25) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35303) ([f56cd4a](https://github.com/bitnami/charts/commit/f56cd4aafd2ce67870d406d6cbe02ac4f7337355)), closes [#35303](https://github.com/bitnami/charts/issues/35303) + +## 12.0.13 (2025-07-24) + +* [bitnami/etcd] feat: introduce preUpgrade job delay (#34973) ([36b2825](https://github.com/bitnami/charts/commit/36b2825bb4f8cc66c70b656dac06859651c79c83)), closes [#34973](https://github.com/bitnami/charts/issues/34973) + +## 12.0.12 (2025-07-22) + +* [bitnami/*] Adapt main README and change ascii (#35173) ([73d15e0](https://github.com/bitnami/charts/commit/73d15e03e04647efa902a1d14a09ea8657429cd0)), closes [#35173](https://github.com/bitnami/charts/issues/35173) +* [bitnami/*] Adapt welcome message to BSI (#35170) ([e1c8146](https://github.com/bitnami/charts/commit/e1c8146831516fb35de736a6f3fd10e5e7a44286)), closes [#35170](https://github.com/bitnami/charts/issues/35170) +* [bitnami/*] Add BSI to charts' READMEs (#35174) ([4973fd0](https://github.com/bitnami/charts/commit/4973fd08dd7e95398ddcc4054538023b542e19f2)), closes [#35174](https://github.com/bitnami/charts/issues/35174) +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35249) ([f4dda1b](https://github.com/bitnami/charts/commit/f4dda1bab2cf8636652c68d4b9441c91c40c5c26)), closes [#35249](https://github.com/bitnami/charts/issues/35249) + +## 12.0.11 (2025-07-16) + +* [bitnami/etcd] Always use initial cluster token (#35160) ([3d49e70](https://github.com/bitnami/charts/commit/3d49e70015e2dd434b4e77643b1dc530803a6ef9)), closes [#35160](https://github.com/bitnami/charts/issues/35160) + +## 12.0.10 (2025-07-15) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#35084) ([6e98277](https://github.com/bitnami/charts/commit/6e982774426942dd86fc4375f3968b7a37e68fee)), closes [#35084](https://github.com/bitnami/charts/issues/35084) + +## 12.0.9 (2025-07-10) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34997) ([45ad7d6](https://github.com/bitnami/charts/commit/45ad7d61d7261a51f433d6420549ea982b9d8c9f)), closes [#34997](https://github.com/bitnami/charts/issues/34997) + +## 12.0.8 (2025-07-08) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34881) ([6d91652](https://github.com/bitnami/charts/commit/6d916522b51a18123e5c70705996d37549820bb0)), closes [#34881](https://github.com/bitnami/charts/issues/34881) + +## 12.0.7 (2025-07-03) + +* [bitnami/etcd] Fix protocol selection in defrag job (#34767) ([d6916ae](https://github.com/bitnami/charts/commit/d6916aef81a368351747137f8107fe6add7e3e93)), closes [#34767](https://github.com/bitnami/charts/issues/34767) + +## 12.0.6 (2025-06-17) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34427) ([9cf5b43](https://github.com/bitnami/charts/commit/9cf5b43efaf70d8e0490b3e0af08a11276cc528b)), closes [#34427](https://github.com/bitnami/charts/issues/34427) + +## 12.0.5 (2025-06-16) + +* [bitnami/etcd] Add values for etcd preUpgradeJob scheduling parameters (#34367) ([a68568b](https://github.com/bitnami/charts/commit/a68568bf913f20198f62f1c7435ebc2bf4f47670)), closes [#34367](https://github.com/bitnami/charts/issues/34367) + +## 12.0.4 (2025-06-11) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34350) ([fd5b5f5](https://github.com/bitnami/charts/commit/fd5b5f563fb0baeab3b69bd23cbac99040831d2d)), closes [#34350](https://github.com/bitnami/charts/issues/34350) + +## 12.0.3 (2025-06-06) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34244) ([3359be4](https://github.com/bitnami/charts/commit/3359be42c419564ea972adf895828ce3da92052f)), closes [#34244](https://github.com/bitnami/charts/issues/34244) + +## 12.0.2 (2025-06-05) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#34152) ([321b964](https://github.com/bitnami/charts/commit/321b96464c71b78121e60b99c08d69eadfad9024)), closes [#34152](https://github.com/bitnami/charts/issues/34152) + +## 12.0.1 (2025-06-03) + +* [etcd] add priorityClass to preupgrade-hook-job. (#34035) ([320d606](https://github.com/bitnami/charts/commit/320d6068c96f78f18e99446f3214afec6cffc3c6)), closes [#34035](https://github.com/bitnami/charts/issues/34035) + +## 12.0.0 (2025-05-23) + +* [bitnami/etcd] :zap: :arrow_up: Update dependency references (#33840) ([f1bf7cb](https://github.com/bitnami/charts/commit/f1bf7cb2bbc239632a46fb04db7b6e3c5ccf6efc)), closes [#33840](https://github.com/bitnami/charts/issues/33840) + +## 11.3.6 (2025-05-19) + +* [bitnami/etcd] Allow release namespace to be overridden (#33550) ([b14c079](https://github.com/bitnami/charts/commit/b14c0794110027fb4198f9529c1b43ae34213d9c)), closes [#33550](https://github.com/bitnami/charts/issues/33550) + +## 11.3.5 (2025-05-16) + +* [bitnami/etcd] fix type of GOMAXPROCS in envs (#33477) ([0a6e907](https://github.com/bitnami/charts/commit/0a6e907ab91b4ed81541b6243d981289deee636d)), closes [#33477](https://github.com/bitnami/charts/issues/33477) +* [bitnami/kubeapps] Deprecation followup (#33579) ([77e312c](https://github.com/bitnami/charts/commit/77e312c1772d4d7c4dc5d3ac0e80f4e452e3a062)), closes [#33579](https://github.com/bitnami/charts/issues/33579) + +## 11.3.4 (2025-05-08) + +* [bitnami/etcd]fix(cronjob-defrag):Remove JWT restrictions (#33321) ([02d7b94](https://github.com/bitnami/charts/commit/02d7b94f68d17fc6ebdc9f7431510b6d6d12f444)), closes [#33321](https://github.com/bitnami/charts/issues/33321) + +## 11.3.3 (2025-05-07) + +* [bitnami/etcd] Release 11.3.3 (#33511) ([92517f3](https://github.com/bitnami/charts/commit/92517f3d191313e140bbee83b5fd5eb22af786a9)), closes [#33511](https://github.com/bitnami/charts/issues/33511) + +## 11.3.2 (2025-05-07) + +* [bitnami/etcd] chore: :recycle: :arrow_up: Update common and remove k8s < 1.23 references (#33358) ([c7f00d1](https://github.com/bitnami/charts/commit/c7f00d1ce5daeb4a2152139724308da61fc47bb8)), closes [#33358](https://github.com/bitnami/charts/issues/33358) + +## 11.3.1 (2025-05-01) + +* [bitnami/etcd] Release 11.3.1 (#33289) ([831c6a5](https://github.com/bitnami/charts/commit/831c6a597f8d157f8e7e25e3d52872860fc49691)), closes [#33289](https://github.com/bitnami/charts/issues/33289) + +## 11.3.0 (2025-04-23) + +* [bitnami/etcd] Add possibility to pass extra env vars to the defrag cronjob (#32953) ([9df94d0](https://github.com/bitnami/charts/commit/9df94d083dbc03eb22202dbbc691fd687827ac66)), closes [#32953](https://github.com/bitnami/charts/issues/32953) + +## 11.2.4 (2025-04-11) + +* [bitnami/etcd] Simplify condition (#32814) ([4b9196b](https://github.com/bitnami/charts/commit/4b9196b591ac921706c23dc47512f54d2f374beb)), closes [#32814](https://github.com/bitnami/charts/issues/32814) + +## 11.2.3 (2025-04-09) + +* [bitnami/etcd] fix livenessprobe when metrics.useSeparateEndpoint is set (#32870) ([0f3c1cc](https://github.com/bitnami/charts/commit/0f3c1ccdeefe6e244c9ff9577e06b6c7e9b4bb15)), closes [#32870](https://github.com/bitnami/charts/issues/32870) + +## 11.2.2 (2025-04-01) + +* [bitnami/etcd] Release 11.2.2 (#32727) ([b34eff7](https://github.com/bitnami/charts/commit/b34eff75291633a39b02ed46c27c60250a07d3f9)), closes [#32727](https://github.com/bitnami/charts/issues/32727) + +## 11.2.1 (2025-03-28) + +* [bitnami/etcd] Release 11.2.1 (#32654) ([5822809](https://github.com/bitnami/charts/commit/5822809df8949d943bcde1d8f6521a2484a8b5cc)), closes [#32654](https://github.com/bitnami/charts/issues/32654) + +## 11.2.0 (2025-03-27) + +* [bitnami/etcd] Set `usePasswordFiles=true` by default (#32345) ([c46b195](https://github.com/bitnami/charts/commit/c46b195f624cdcc487dc503fed040bf80278b615)), closes [#32345](https://github.com/bitnami/charts/issues/32345) + +## 11.1.6 (2025-03-21) + +* [bitnami/etcd] Release 11.1.6 (#32555) ([5c90087](https://github.com/bitnami/charts/commit/5c900872e5d10675b56c53046cfca275cb01e601)), closes [#32555](https://github.com/bitnami/charts/issues/32555) + +## 11.1.5 (2025-03-12) + +* [bitnami/*] Add tanzuCategory annotation (#32409) ([a8fba5c](https://github.com/bitnami/charts/commit/a8fba5cb01f6f4464ca7f69c50b0fbe97d837a95)), closes [#32409](https://github.com/bitnami/charts/issues/32409) +* [bitnami/etcd] bugfix: skip TLS verification with self-signed certs (#32417) ([2ca96b6](https://github.com/bitnami/charts/commit/2ca96b6a2b019eb838e352dd98142bf20d443990)), closes [#32417](https://github.com/bitnami/charts/issues/32417) + +## 11.1.4 (2025-03-11) + +* [bitnami/etcd] Release 11.1.4 (#32397) ([0ce5db9](https://github.com/bitnami/charts/commit/0ce5db9b0e37162cec32789580d4892264783a88)), closes [#32397](https://github.com/bitnami/charts/issues/32397) + +## 11.1.3 (2025-03-05) + +* [bitnami/etcd] Release 11.1.3 (#32330) ([87e001e](https://github.com/bitnami/charts/commit/87e001e707c4abe8f368e0d151c5b73f6870b81a)), closes [#32330](https://github.com/bitnami/charts/issues/32330) + +## 11.1.2 (2025-03-05) + +* [bitnami/etcd] Release 11.1.2 (#32286) ([447bd72](https://github.com/bitnami/charts/commit/447bd72be474be3ddacc90c4dc52c80a66254d2f)), closes [#32286](https://github.com/bitnami/charts/issues/32286) + +## 11.1.1 (2025-03-04) + +* [bitnami/etcd] bugfix: probes to use healthcheck when TLS client-to-server authentication (#32258) ([a6791c5](https://github.com/bitnami/charts/commit/a6791c5ee6b83f963406a8b046d424949269e06c)), closes [#32258](https://github.com/bitnami/charts/issues/32258) + +## 11.1.0 (2025-02-24) + +* [bitnami/etcd] feat: basic customization params for etcd upgrade-job (#32099) ([b820c10](https://github.com/bitnami/charts/commit/b820c10df5a2066e25ef101f1a279819a83c238b)), closes [#32099](https://github.com/bitnami/charts/issues/32099) + +## 11.0.8 (2025-02-19) + +* [bitnami/etcd] Release 11.0.8 (#31993) ([d854fdd](https://github.com/bitnami/charts/commit/d854fdd029c002ec6591bd948a1c69a5c5bdb2c4)), closes [#31993](https://github.com/bitnami/charts/issues/31993) + +## 11.0.7 (2025-02-12) + +* [bitnami/*] Use CDN url for the Bitnami Application Icons (#31881) ([d9bb11a](https://github.com/bitnami/charts/commit/d9bb11a9076b9bfdcc70ea022c25ef50e9713657)), closes [#31881](https://github.com/bitnami/charts/issues/31881) +* [bitnami/etcd] Release 11.0.7 (#31886) ([04e29ce](https://github.com/bitnami/charts/commit/04e29ce87c72cb596b1e504e3ee89ebf2a786f98)), closes [#31886](https://github.com/bitnami/charts/issues/31886) + +## 11.0.6 (2025-02-04) + +* [bitnami/etcd] Release 11.0.6 (#31750) ([a99c4ae](https://github.com/bitnami/charts/commit/a99c4ae6f11749ed920bcc9fd05b81f7ebdf0f6b)), closes [#31750](https://github.com/bitnami/charts/issues/31750) + +## 11.0.5 (2025-01-31) + +* [bitnami/etcd] Fix: issue with preupgrade job during scale down (#31539) ([36f16bf](https://github.com/bitnami/charts/commit/36f16bf90cc1461000a8f73341e0b770be0b6413)), closes [#31539](https://github.com/bitnami/charts/issues/31539) +* Update copyright year (#31682) ([e9f02f5](https://github.com/bitnami/charts/commit/e9f02f5007068751f7eb2270fece811e685c99b6)), closes [#31682](https://github.com/bitnami/charts/issues/31682) + +## 11.0.4 (2025-01-24) + +* [bitnami/etcd] Release 11.0.4 (#31591) ([b342ec8](https://github.com/bitnami/charts/commit/b342ec844c9106fb724ec952fa4abbe7f26844d1)), closes [#31591](https://github.com/bitnami/charts/issues/31591) + +## 11.0.3 (2025-01-24) + +* [bitnami/etcd] Release 11.0.3 (#31549) ([a3b65e5](https://github.com/bitnami/charts/commit/a3b65e57e2030811813cfcd132c94efa45f57eb5)), closes [#31549](https://github.com/bitnami/charts/issues/31549) + +## 11.0.2 (2025-01-23) + +* [bitnami/etcd] fix for PreUpgrade Job Issue (#31524) ([3bf5de8](https://github.com/bitnami/charts/commit/3bf5de8c881d945a4542784a3628fd8fd7c2f668)), closes [#31524](https://github.com/bitnami/charts/issues/31524) + +## 11.0.1 (2025-01-20) + +* [bitnami/etcd] fixed tls enable handling and v3 environment variable support for defrag cronjob (#31 ([c3c3e43](https://github.com/bitnami/charts/commit/c3c3e4327129db9fa1a88ee7a79227647404d63f)), closes [#31270](https://github.com/bitnami/charts/issues/31270) + +## 11.0.0 (2025-01-20) + +* [bitnami/etcd] Add pre-upgrade hook (#31161) ([38be6c5](https://github.com/bitnami/charts/commit/38be6c5dcd4ed7bff0c830d8eefe98962310c54d)), closes [#31161](https://github.com/bitnami/charts/issues/31161) + +## 10.7.3 (2025-01-17) + +* [bitnami/etcd] Release 10.7.3 (#31419) ([7ef8d56](https://github.com/bitnami/charts/commit/7ef8d562ec9953460d675b02cfbce336ab66bceb)), closes [#31419](https://github.com/bitnami/charts/issues/31419) + +## 10.7.2 (2025-01-12) + +* [bitnami/*] Fix typo in README (#31052) ([b41a51d](https://github.com/bitnami/charts/commit/b41a51d1bd04841fc108b78d3b8357a5292771c8)), closes [#31052](https://github.com/bitnami/charts/issues/31052) +* [bitnami/etcd] Release 10.7.2 (#31312) ([0e47c54](https://github.com/bitnami/charts/commit/0e47c5464e650b9149a87fbeac84f5a497012f36)), closes [#31312](https://github.com/bitnami/charts/issues/31312) + +## 10.7.1 (2024-12-11) + +* [bitnami/etcd] keep snapshot-pvc (#30815) ([2a63958](https://github.com/bitnami/charts/commit/2a639583f613184bb1e05346ba25169d872db3cf)), closes [#30815](https://github.com/bitnami/charts/issues/30815) + +## 10.7.0 (2024-12-10) + +* [bitnami/*] Add Bitnami Premium to NOTES.txt (#30854) ([3dfc003](https://github.com/bitnami/charts/commit/3dfc00376df6631f0ce54b8d440d477f6caa6186)), closes [#30854](https://github.com/bitnami/charts/issues/30854) +* [bitnami/etcd] Detect non-standard images (#30895) ([3e2009f](https://github.com/bitnami/charts/commit/3e2009f4310e4c3402df62e5dcc73d911fa4d0b7)), closes [#30895](https://github.com/bitnami/charts/issues/30895) + +## 10.6.1 (2024-12-04) + +* [bitnami/etcd] Release 10.6.1 (#30746) ([9d81d81](https://github.com/bitnami/charts/commit/9d81d81ddbf73b9453468972a8c1501879888d32)), closes [#30746](https://github.com/bitnami/charts/issues/30746) + +## 10.6.0 (2024-12-02) + +* [bitnami/*] docs: :memo: Add "Backup & Restore" section (#30711) ([35ab536](https://github.com/bitnami/charts/commit/35ab5363741e7548f4076f04da6e62d10153c60c)), closes [#30711](https://github.com/bitnami/charts/issues/30711) +* [bitnami/*] docs: :memo: Add "Prometheus metrics" (batch 2) (#30662) ([50e0570](https://github.com/bitnami/charts/commit/50e0570f98ab15308af7910b405baa4480e5fe3f)), closes [#30662](https://github.com/bitnami/charts/issues/30662) +* [bitnami/*] docs: :memo: Add "Update Credentials" (batch 1) (#30685) ([be6aa1d](https://github.com/bitnami/charts/commit/be6aa1df0bd4479173a78400fef7295de15b408d)), closes [#30685](https://github.com/bitnami/charts/issues/30685) +* [bitnami/etcd] Add loadBalancerClass for Etcd chart (#30697) ([7ee741a](https://github.com/bitnami/charts/commit/7ee741ab42e6d1f22c7f3dca386542d7be801dac)), closes [#30697](https://github.com/bitnami/charts/issues/30697) + +## 10.5.3 (2024-11-12) + +* [bitnami/etcd] Release 10.5.3 (#30432) ([5f6b173](https://github.com/bitnami/charts/commit/5f6b173360444a3d78b365f7346c373338db5a90)), closes [#30432](https://github.com/bitnami/charts/issues/30432) + +## 10.5.2 (2024-11-07) + +* [bitnami/etcd] Release 10.5.2 (#30261) ([6944f1e](https://github.com/bitnami/charts/commit/6944f1e3dc10cfaba00e5fd7845a7ed8ae3ad9a5)), closes [#30261](https://github.com/bitnami/charts/issues/30261) + +## 10.5.1 (2024-11-06) + +* [bitnami/etcd]: Fix volume indentation for defrag cronjob (#30192) ([5ef34f9](https://github.com/bitnami/charts/commit/5ef34f968b33e72a065ac6ad1249a1828a5744fd)), closes [#30192](https://github.com/bitnami/charts/issues/30192) + +## 10.5.0 (2024-11-05) + +* [bitnami/*] Remove wrong comment about imagePullPolicy (#30107) ([a51f9e4](https://github.com/bitnami/charts/commit/a51f9e4bb0fbf77199512d35de7ac8abe055d026)), closes [#30107](https://github.com/bitnami/charts/issues/30107) +* [bitnami/etcd]: Fix Defrag "bug" and prevent CronJob templates ambiguity (#30077) ([15e3fea](https://github.com/bitnami/charts/commit/15e3feae76b32ac9c6af4588c7b12a14cb76c3b2)), closes [#30077](https://github.com/bitnami/charts/issues/30077) [#30053](https://github.com/bitnami/charts/issues/30053) [#30053](https://github.com/bitnami/charts/issues/30053) + +## 10.4.2 (2024-10-28) + +* [bitnami/etcd] Fixing defrag etcd for authenticated and tls clusters (#30096) ([558e971](https://github.com/bitnami/charts/commit/558e971fd3951ea349cb14739920d3c948f84edb)), closes [#30096](https://github.com/bitnami/charts/issues/30096) + +## 10.4.1 (2024-10-24) + +* [bitnami/etcd]: Fix podLabels bug (#30052) ([9a5b58e](https://github.com/bitnami/charts/commit/9a5b58eff6fadde572f596f387ee921259a80469)), closes [#30052](https://github.com/bitnami/charts/issues/30052) + +## 10.4.0 (2024-10-22) + +* [bitnami/etcd]: Automatic etcd defragmentation (#29967) ([ea1683d](https://github.com/bitnami/charts/commit/ea1683dabaf7448a1e2fc6eca0a37a70899270e2)), closes [#29967](https://github.com/bitnami/charts/issues/29967) +* Update documentation links to techdocs.broadcom.com (#29931) ([f0d9ad7](https://github.com/bitnami/charts/commit/f0d9ad78f39f633d275fc576d32eae78ded4d0b8)), closes [#29931](https://github.com/bitnami/charts/issues/29931) + +## 10.3.1 (2024-10-16) + +* [bitnami/etcd] fix etcd cronjob volume (#29895) ([64aad9b](https://github.com/bitnami/charts/commit/64aad9b12ec2e7f48636944a4cc9d180b5c761a4)), closes [#29895](https://github.com/bitnami/charts/issues/29895) + +## 10.3.0 (2024-10-14) + +* [bitnami/etcd]: Allow overriding of snapshot command (#29873) ([5b8af4f](https://github.com/bitnami/charts/commit/5b8af4f5b340ef784955b1714741ec0f33051129)), closes [#29873](https://github.com/bitnami/charts/issues/29873) + +## 10.2.19 (2024-10-02) + +* [bitnami/etcd] Release 10.2.19 (#29689) ([624fa30](https://github.com/bitnami/charts/commit/624fa30d7eaaf67557a9dd6ba3393ced2419256b)), closes [#29689](https://github.com/bitnami/charts/issues/29689) + +## 10.2.18 (2024-09-23) + +* [bitnami/etcd] Release 10.2.18 (#29567) ([4c79d16](https://github.com/bitnami/charts/commit/4c79d16b6a975868603490473c50dc709ca51b9e)), closes [#29567](https://github.com/bitnami/charts/issues/29567) + +## 10.2.17 (2024-09-17) + +* [bitnami/etcd] test: :white_check_mark: Improve reliability of ginkgo tests (#29476) ([03fc421](https://github.com/bitnami/charts/commit/03fc4212a65b2af824cc32e7d4b19dd48686b947)), closes [#29476](https://github.com/bitnami/charts/issues/29476) + +## 10.2.16 (2024-09-12) + +* [bitnami/etcd] Release 10.2.16 (#29380) ([250df75](https://github.com/bitnami/charts/commit/250df756b74ba578a9e4521c2a958e3860ec1166)), closes [#29380](https://github.com/bitnami/charts/issues/29380) + +## 10.2.15 (2024-09-12) + +* [bitnami/etcd] Release 10.2.15 (#29373) ([be542d6](https://github.com/bitnami/charts/commit/be542d607f92d6efbc7e32331199354d21d46384)), closes [#29373](https://github.com/bitnami/charts/issues/29373) + +## 10.2.14 (2024-09-05) + +* [bitnami/etcd] Release 10.2.14 (#29233) ([72a3026](https://github.com/bitnami/charts/commit/72a30265033f92f4b1e2d357948140ffd5ab4751)), closes [#29233](https://github.com/bitnami/charts/issues/29233) + +## 10.2.13 (2024-08-27) + +* [bitnami/etcd] Release 10.2.13 (#29048) ([14a9538](https://github.com/bitnami/charts/commit/14a9538b0d55f496b5ce4387307b1fa5c0bc2378)), closes [#29048](https://github.com/bitnami/charts/issues/29048) + +## 10.2.12 (2024-08-07) + +* [bitnami/etcd] Release 10.2.12 (#28712) ([4a5fc9e](https://github.com/bitnami/charts/commit/4a5fc9ee1b8d34ea953c3c5f9135297611801859)), closes [#28712](https://github.com/bitnami/charts/issues/28712) + +## 10.2.11 (2024-07-25) + +* [bitnami/etcd] Release 10.2.11 (#28404) ([993529a](https://github.com/bitnami/charts/commit/993529a5f95e8d5126108f40fc1cb0b07cc04526)), closes [#28404](https://github.com/bitnami/charts/issues/28404) + +## 10.2.10 (2024-07-24) + +* [bitnami/etcd] Release 10.2.10 (#28309) ([c247f40](https://github.com/bitnami/charts/commit/c247f40778e1183d69e7956d39a277f425600d61)), closes [#28309](https://github.com/bitnami/charts/issues/28309) + +## 10.2.9 (2024-07-24) + +* [bitnami/etcd] Release 10.2.9 (#28240) ([9f7651d](https://github.com/bitnami/charts/commit/9f7651d9e6790103a6ece29d28aaa5c3e3c5b944)), closes [#28240](https://github.com/bitnami/charts/issues/28240) + +## 10.2.8 (2024-07-23) + +* [bitnami/etcd] Release 10.2.8 (#28218) ([b4cc3af](https://github.com/bitnami/charts/commit/b4cc3af8434acad96153e3e35bffd80dfab7f3c8)), closes [#28218](https://github.com/bitnami/charts/issues/28218) + +## 10.2.7 (2024-07-16) + +* [bitnami/etcd] Global StorageClass as default value (#28016) ([983fdc1](https://github.com/bitnami/charts/commit/983fdc186ab87520e8d06ff02a8bbbcb817edb82)), closes [#28016](https://github.com/bitnami/charts/issues/28016) + +## 10.2.6 (2024-07-04) + +* [bitnami/etcd] Release 10.2.6 (#27765) ([7576e0c](https://github.com/bitnami/charts/commit/7576e0c6f828ce1ac3fdce780764a6f311a9a773)), closes [#27765](https://github.com/bitnami/charts/issues/27765) + +## 10.2.5 (2024-07-03) + +* [bitnami/*] Update README changing TAC wording (#27530) ([52dfed6](https://github.com/bitnami/charts/commit/52dfed6bac44d791efabfaf06f15daddc4fefb0c)), closes [#27530](https://github.com/bitnami/charts/issues/27530) +* [bitnami/etcd] Release 10.2.5 (#27646) ([41a7444](https://github.com/bitnami/charts/commit/41a74443e1abe22ae4e81aca14dbe52127c6aae1)), closes [#27646](https://github.com/bitnami/charts/issues/27646) + +## 10.2.4 (2024-06-18) + +* [bitnami/etcd] Release 10.2.4 (#27342) ([ddd960c](https://github.com/bitnami/charts/commit/ddd960cd73fedd20e4f5411c9a30d7d3be56e6ab)), closes [#27342](https://github.com/bitnami/charts/issues/27342) + +## 10.2.3 (2024-06-17) + +* [bitnami/etcd] Release 10.2.3 (#27215) ([a37f7f3](https://github.com/bitnami/charts/commit/a37f7f3f028c05b7da745f8720e9daa69d691e01)), closes [#27215](https://github.com/bitnami/charts/issues/27215) + +## 10.2.2 (2024-06-06) + +* [bitnami/etcd] Release 10.2.2 (#26951) ([8c377b6](https://github.com/bitnami/charts/commit/8c377b62e3cb7346cd639c7455e2b86e0336a931)), closes [#26951](https://github.com/bitnami/charts/issues/26951) + +## 10.2.1 (2024-06-06) + +* [bitnami/etcd] Align PodDisruptionBudgets with templates (#26692) ([5a15485](https://github.com/bitnami/charts/commit/5a154857226db6b76f915dbf837c1cc79cc013a1)), closes [#26692](https://github.com/bitnami/charts/issues/26692) + +## 10.2.0 (2024-06-06) + +* [bitnami/etcd] Non-manual chart upgrades (#25655) ([74000ba](https://github.com/bitnami/charts/commit/74000ba366b23ca6ca4d6f2730bf6d22d9404689)), closes [#25655](https://github.com/bitnami/charts/issues/25655) + +## 10.1.4 (2024-06-05) + +* [bitnami/etcd] Release 10.1.4 (#26723) ([21a4c55](https://github.com/bitnami/charts/commit/21a4c55475238e7e2cccc922b0b5463aba26fefd)), closes [#26723](https://github.com/bitnami/charts/issues/26723) + +## 10.1.3 (2024-06-04) + +* [bitnami/etcd] Bump chart version (#26628) ([86ade26](https://github.com/bitnami/charts/commit/86ade2624082951d3e618a389293618a10e433d4)), closes [#26628](https://github.com/bitnami/charts/issues/26628) + +## 10.1.2 (2024-05-29) + +* [bitnami/etcd] Release 10.1.2 (#26555) ([f646374](https://github.com/bitnami/charts/commit/f6463741bd6d21d0b0330483f4961f0112680b97)), closes [#26555](https://github.com/bitnami/charts/issues/26555) + +## 10.1.1 (2024-05-24) + +* [bitnami/etcd] Fix livenessProbe for ETCD with TLS (#26407) ([4dbf742](https://github.com/bitnami/charts/commit/4dbf74264d1e718328757a07dc4dd4b86ab5f6da)), closes [#26407](https://github.com/bitnami/charts/issues/26407) + +## 10.1.0 (2024-05-21) + +* [bitnami/*] ci: :construction_worker: Add tag and changelog support (#25359) ([91c707c](https://github.com/bitnami/charts/commit/91c707c9e4e574725a09505d2d313fb93f1b4c0a)), closes [#25359](https://github.com/bitnami/charts/issues/25359) +* [bitnami/etcd] feat: :sparkles: :lock: Add warning when original images are replaced (#26198) ([dafbc01](https://github.com/bitnami/charts/commit/dafbc012dd0010993ded17cd3056cecfd80245d2)), closes [#26198](https://github.com/bitnami/charts/issues/26198) +* [bitnami/etcd] Use different liveness/readiness probes (#25984) ([a1ea1f7](https://github.com/bitnami/charts/commit/a1ea1f7ae4ef7047e2bb990aadc893b2d8bfd0c0)), closes [#25984](https://github.com/bitnami/charts/issues/25984) + +## 10.0.11 (2024-05-18) + +* [bitnami/etcd] Release 10.0.11 updating components versions (#26011) ([27a7d6c](https://github.com/bitnami/charts/commit/27a7d6cb9de0740655723ee8cc02b9be15086351)), closes [#26011](https://github.com/bitnami/charts/issues/26011) + +## 10.0.10 (2024-05-17) + +* [bitnami/etcd] Revert #25896 (#25917) ([8f095b1](https://github.com/bitnami/charts/commit/8f095b11fea7d8f17de85c098f892c56e8c1f5e9)), closes [#25896](https://github.com/bitnami/charts/issues/25896) [#25917](https://github.com/bitnami/charts/issues/25917) +* Fix inconsistent readme and values.yaml for etcd (#25941) ([c289f0e](https://github.com/bitnami/charts/commit/c289f0e8815089f52b77d8977fe8e1793bf19ff5)), closes [#25941](https://github.com/bitnami/charts/issues/25941) + +## 10.0.9 (2024-05-15) + +* [bitnami/etcd] PDB review (#25896) ([5235139](https://github.com/bitnami/charts/commit/52351399e9d9035dbf3a519306d4ee45eb8a8adf)), closes [#25896](https://github.com/bitnami/charts/issues/25896) + +## 10.0.8 (2024-05-13) + +* [bitnami/*] Change non-root and rolling-tags doc URLs (#25628) ([b067c94](https://github.com/bitnami/charts/commit/b067c94f6bcde427863c197fd355f0b5ba12ff5b)), closes [#25628](https://github.com/bitnami/charts/issues/25628) +* [bitnami/etcd] Release 10.0.8 updating components versions (#25751) ([9824a8b](https://github.com/bitnami/charts/commit/9824a8b0fd0b67c91e7a553e90b5334104308e21)), closes [#25751](https://github.com/bitnami/charts/issues/25751) + +## 10.0.7 (2024-05-08) + +* [bitnami/etcd] Release 10.0.7 updating components versions (#25598) ([36b07ca](https://github.com/bitnami/charts/commit/36b07ca303a193f222b7837bbafedd0c3a972d49)), closes [#25598](https://github.com/bitnami/charts/issues/25598) + +## 10.0.6 (2024-05-07) + +* [bitnami/etcd] Release 10.0.6 updating components versions (#25589) ([cfb62c0](https://github.com/bitnami/charts/commit/cfb62c0bcf7e6d9a7b7c5dabc103d774839b9123)), closes [#25589](https://github.com/bitnami/charts/issues/25589) + +## 10.0.5 (2024-05-07) + +* [bitnami/*] Set new header/owner (#25558) ([8d1dc11](https://github.com/bitnami/charts/commit/8d1dc11f5fb30db6fba50c43d7af59d2f79deed3)), closes [#25558](https://github.com/bitnami/charts/issues/25558) +* [bitnami/etcd] Properly get ROOT_PASSWORD when providing an existing secret (#25574) ([ae696c5](https://github.com/bitnami/charts/commit/ae696c5c2d2fffa3f7de2685776863ed052f5834)), closes [#25574](https://github.com/bitnami/charts/issues/25574) +* Replace VMware by Broadcom copyright text (#25306) ([a5e4bd0](https://github.com/bitnami/charts/commit/a5e4bd0e35e419203793976a78d9d0a13de92c76)), closes [#25306](https://github.com/bitnami/charts/issues/25306) + +## 10.0.4 (2024-04-24) + +* [bitnami/etcd] Release 10.0.4 updating components versions (#25354) ([bc549df](https://github.com/bitnami/charts/commit/bc549dfe2d9a16093f3acba5d5f875547a43cf3d)), closes [#25354](https://github.com/bitnami/charts/issues/25354) + +## 10.0.3 (2024-04-05) + +* [bitnami/etcd] Release 10.0.3 updating components versions (#25008) ([45bb657](https://github.com/bitnami/charts/commit/45bb657c2a5c0b69fc6709325f4dacf8b3ae0786)), closes [#25008](https://github.com/bitnami/charts/issues/25008) + +## 10.0.2 (2024-04-04) + +* [bitnami/etcd] Release 10.0.2 (#24878) ([c493c44](https://github.com/bitnami/charts/commit/c493c44826eaa343e9924d403f699f1da785f311)), closes [#24878](https://github.com/bitnami/charts/issues/24878) +* Update resourcesPreset comments (#24467) ([92e3e8a](https://github.com/bitnami/charts/commit/92e3e8a507326d2a20a8f10ab3e7746a2ec5c554)), closes [#24467](https://github.com/bitnami/charts/issues/24467) + +## 10.0.1 (2024-03-29) + +* [bitnami/etcd] Release 10.0.1 updating components versions (#24753) ([17a3edb](https://github.com/bitnami/charts/commit/17a3edb41c29c6578ab876f0f1412bdb96cd69e2)), closes [#24753](https://github.com/bitnami/charts/issues/24753) +* Fix linter issues in markdown ([8991e37](https://github.com/bitnami/charts/commit/8991e37e0378cc300da6f42f7c012fd2c5723271)) + +## 10.0.0 (2024-03-18) + +* [bitnami/*] Reorder Chart sections (#24455) ([0cf4048](https://github.com/bitnami/charts/commit/0cf4048e8743f70a9753d460655bd030cbff6824)), closes [#24455](https://github.com/bitnami/charts/issues/24455) +* [bitnami/etcd] feat!: :lock: :boom: Improve security defaults (#24323) ([ef88229](https://github.com/bitnami/charts/commit/ef88229c3046496b1160ab1618d883727baeff56)), closes [#24323](https://github.com/bitnami/charts/issues/24323) + +## 9.15.2 (2024-03-07) + +* [bitnami/etcd] Release 9.15.2 updating components versions (#24248) ([8f5400d](https://github.com/bitnami/charts/commit/8f5400db8b1526c5328fac23d8f9b631cd79e1b6)), closes [#24248](https://github.com/bitnami/charts/issues/24248) + +## 9.15.1 (2024-03-06) + +* [bitnami/etcd] Release 9.15.1 updating components versions (#24194) ([c996abd](https://github.com/bitnami/charts/commit/c996abdee55b8d588da91eed7693cdb5443cf566)), closes [#24194](https://github.com/bitnami/charts/issues/24194) + +## 9.15.0 (2024-03-06) + +* [bitnami/etcd] feat: :sparkles: :lock: Add automatic adaptation for Openshift restricted-v2 SCC (#24 ([038ebc7](https://github.com/bitnami/charts/commit/038ebc7c7ae2770bec76257ee13b53f78e9d8efb)), closes [#24079](https://github.com/bitnami/charts/issues/24079) + +## 9.14.3 (2024-02-29) + +* [bitnami/etcd] Release 9.14.3 updating components versions (#23984) ([813259c](https://github.com/bitnami/charts/commit/813259c6c7ce88844b0277d8ad852272837bdb34)), closes [#23984](https://github.com/bitnami/charts/issues/23984) + +## 9.14.2 (2024-02-21) + +* [bitnami/etcd] Release 9.14.2 updating components versions (#23767) ([fc6901e](https://github.com/bitnami/charts/commit/fc6901e32992a59ebdaee1f6739fb89cb7b74d70)), closes [#23767](https://github.com/bitnami/charts/issues/23767) + +## 9.14.1 (2024-02-21) + +* [bitnami/etcd] Release 9.14.1 updating components versions (#23701) ([277354f](https://github.com/bitnami/charts/commit/277354f02021bd0f1851f7dbe8885afeb197e2ad)), closes [#23701](https://github.com/bitnami/charts/issues/23701) + +## 9.14.0 (2024-02-20) + +* [bitnami/etcd] feat: :sparkles: :lock: Add readOnlyRootFilesystem support (#23611) ([045d64e](https://github.com/bitnami/charts/commit/045d64e84fbe9e6318e005f384e6845bde90e205)), closes [#23611](https://github.com/bitnami/charts/issues/23611) + +## 9.13.0 (2024-02-20) + +* [bitnami/*] Bump all versions (#23602) ([b70ee2a](https://github.com/bitnami/charts/commit/b70ee2a30e4dc256bf0ac52928fb2fa7a70f049b)), closes [#23602](https://github.com/bitnami/charts/issues/23602) + +## 9.12.0 (2024-02-16) + +* [bitnami/etcd] feat: :sparkles: :lock: Add resource preset support (#23446) ([c23f6a6](https://github.com/bitnami/charts/commit/c23f6a6d76c13aab141685d8207a9902e0ac4a44)), closes [#23446](https://github.com/bitnami/charts/issues/23446) + +## 9.11.0 (2024-02-07) + +* [bitnami/etcd] feat: :lock: Enable networkPolicy (#23286) ([7cbf9ec](https://github.com/bitnami/charts/commit/7cbf9ec8c9b413821f479ee29ca67d9d0d093571)), closes [#23286](https://github.com/bitnami/charts/issues/23286) + +## 9.10.8 (2024-02-07) + +* [bitnami/etcd] Release 9.10.8 updating components versions (#23298) ([ec528c3](https://github.com/bitnami/charts/commit/ec528c3e627ca78f8776bd87cc5ad7c628b5cd55)), closes [#23298](https://github.com/bitnami/charts/issues/23298) + +## 9.10.7 (2024-02-07) + +* [bitnami/etcd] Release 9.10.7 updating components versions (#23247) ([2c9e6fb](https://github.com/bitnami/charts/commit/2c9e6fbdc20d1e025073671b3ee7c3108b044a9e)), closes [#23247](https://github.com/bitnami/charts/issues/23247) + +## 9.10.6 (2024-02-02) + +* [bitnami/etcd] Release 9.10.6 updating components versions (#23068) ([e4f2d5b](https://github.com/bitnami/charts/commit/e4f2d5b60efbd3a1cef4a9117aaff36f50274bd7)), closes [#23068](https://github.com/bitnami/charts/issues/23068) + +## 9.10.5 (2024-02-01) + +* [bitnami/etcd] Release 9.10.5 updating components versions (#23001) ([3adf43c](https://github.com/bitnami/charts/commit/3adf43cf45e27f11495af497635c4dccba7bfafb)), closes [#23001](https://github.com/bitnami/charts/issues/23001) + +## 9.10.4 (2024-01-30) + +* [bitnami/etcd] Release 9.10.4 updating components versions (#22859) ([a016f58](https://github.com/bitnami/charts/commit/a016f58168d2842f9d14acaa1c650f904d9a5aa9)), closes [#22859](https://github.com/bitnami/charts/issues/22859) + +## 9.10.3 (2024-01-27) + +* [bitnami/*] Move documentation sections from docs.bitnami.com back to the README (#22203) ([7564f36](https://github.com/bitnami/charts/commit/7564f36ca1e95ff30ee686652b7ab8690561a707)), closes [#22203](https://github.com/bitnami/charts/issues/22203) +* [bitnami/etcd] Release 9.10.3 updating components versions (#22774) ([51dfe9c](https://github.com/bitnami/charts/commit/51dfe9cdf0833bc34017fc30b6c39b42c9e4d136)), closes [#22774](https://github.com/bitnami/charts/issues/22774) + +## 9.10.2 (2024-01-24) + +* [bitnami/etcd] fix: :bug: Set seLinuxOptions to null for Openshift compatibility (#22584) ([d77f13b](https://github.com/bitnami/charts/commit/d77f13b8bdb04215b04a6d5b9edcba00a0203af9)), closes [#22584](https://github.com/bitnami/charts/issues/22584) + +## 9.10.1 (2024-01-22) + +* [bitnami/etcd] Release 9.10.1 updating components versions (#22542) ([6303449](https://github.com/bitnami/charts/commit/630344996edbd183b1accdef022ff513946de0f7)), closes [#22542](https://github.com/bitnami/charts/issues/22542) + +## 9.10.0 (2024-01-19) + +* [bitnami/etcd] fix: :lock: Move service-account token auto-mount to pod declaration (#22397) ([0bd8132](https://github.com/bitnami/charts/commit/0bd8132d8b93dc0b24aa6c6c01cafd594b81847f)), closes [#22397](https://github.com/bitnami/charts/issues/22397) + +## 9.9.1 (2024-01-18) + +* [bitnami/etcd] Release 9.9.1 updating components versions (#22272) ([0fcd1b8](https://github.com/bitnami/charts/commit/0fcd1b858c9d8ab4ecc5afb7b750cc57e9c4daea)), closes [#22272](https://github.com/bitnami/charts/issues/22272) + +## 9.9.0 (2024-01-16) + +* [bitnami/etcd] fix: :lock: Improve podSecurityContext and containerSecurityContext with essential se ([06f7d1b](https://github.com/bitnami/charts/commit/06f7d1b0a000dda237a5df92ec77d1360a74207d)), closes [#22115](https://github.com/bitnami/charts/issues/22115) + +## 9.8.2 (2024-01-15) + +* [bitnami/*] Fix ref links (in comments) (#21822) ([e4fa296](https://github.com/bitnami/charts/commit/e4fa296106b225cf8c82445727c675c7c725e380)), closes [#21822](https://github.com/bitnami/charts/issues/21822) +* [bitnami/etcd] fix: :lock: Do not use the default service account (#22008) ([8749ab2](https://github.com/bitnami/charts/commit/8749ab24e5fe7a74c301604f8738d4ff68aa4150)), closes [#22008](https://github.com/bitnami/charts/issues/22008) + +## 9.8.1 (2024-01-10) + +* [bitnami/*] Fix docs.bitnami.com broken links (#21901) ([f35506d](https://github.com/bitnami/charts/commit/f35506d2dadee4f097986e7792df1f53ab215b5d)), closes [#21901](https://github.com/bitnami/charts/issues/21901) +* [bitnami/*] Update copyright: Year and company (#21815) ([6c4bf75](https://github.com/bitnami/charts/commit/6c4bf75dec58fc7c9aee9f089777b1a858c17d5b)), closes [#21815](https://github.com/bitnami/charts/issues/21815) +* [bitnami/etcd] Release 9.8.1 updating components versions (#21934) ([a282aa6](https://github.com/bitnami/charts/commit/a282aa647063912abad9a2c6387c65d27fe2cb61)), closes [#21934](https://github.com/bitnami/charts/issues/21934) + +## 9.8.0 (2023-12-21) + +* Allow Configuration of ServiceAccount for CronJob Pods (#21512) ([c7962bd](https://github.com/bitnami/charts/commit/c7962bd4c44f02a62242ef4b28761b7edbd62c9b)), closes [#21512](https://github.com/bitnami/charts/issues/21512) + +## 9.7.7 (2023-12-19) + +* [bitnami/etcd] Release 9.7.7 updating components versions (#21652) ([cf772d4](https://github.com/bitnami/charts/commit/cf772d4b21f1d2e5784138526f9e31fca563e819)), closes [#21652](https://github.com/bitnami/charts/issues/21652) + +## 9.7.6 (2023-12-17) + +* [bitnami/etcd] Release 9.7.6 updating components versions (#21605) ([71081da](https://github.com/bitnami/charts/commit/71081dafac7dadc1fdb8223c8b238fa5167d2689)), closes [#21605](https://github.com/bitnami/charts/issues/21605) + +## 9.7.5 (2023-12-07) + +* [bitnami/etcd] Release 9.7.5 updating components versions (#21467) ([a09047c](https://github.com/bitnami/charts/commit/a09047cb5971fb6a908519b86e85c6667b8892f0)), closes [#21467](https://github.com/bitnami/charts/issues/21467) + +## 9.7.4 (2023-12-06) + +* [bitnami/etcd] Release 9.7.4 updating components versions (#21427) ([d972395](https://github.com/bitnami/charts/commit/d972395da57595193796652c84d5e70899db9039)), closes [#21427](https://github.com/bitnami/charts/issues/21427) + +## 9.7.3 (2023-11-24) + +* [bitnami/etcd] Fix podLabels for disasterrecovery cronjob (#21199) ([bd928ee](https://github.com/bitnami/charts/commit/bd928eef2b7346c2f07ea4190ae092e73dfbd33f)), closes [#21199](https://github.com/bitnami/charts/issues/21199) + +## 9.7.2 (2023-11-21) + +* [bitnami/*] Rename solutions to "Bitnami package for ..." (#21038) ([b82f979](https://github.com/bitnami/charts/commit/b82f979e4fb63423fe6e2192c946d09d79c944fc)), closes [#21038](https://github.com/bitnami/charts/issues/21038) +* [bitnami/etcd] Release 9.7.2 updating components versions (#21113) ([78a07dc](https://github.com/bitnami/charts/commit/78a07dc1c1e75fda1f614cc7200266bafe1e6004)), closes [#21113](https://github.com/bitnami/charts/issues/21113) + +## 9.7.1 (2023-11-20) + +* [bitnami/etcd] Release 9.7.1 updating components versions (#21061) ([371f22a](https://github.com/bitnami/charts/commit/371f22ad8fbc644079450c52a667495f33eb8e9a)), closes [#21061](https://github.com/bitnami/charts/issues/21061) + +## 9.7.0 (2023-11-17) + +* [bitnami/*] Remove relative links to non-README sections, add verification for that and update TL;DR ([1103633](https://github.com/bitnami/charts/commit/11036334d82df0490aa4abdb591543cab6cf7d7f)), closes [#20967](https://github.com/bitnami/charts/issues/20967) +* [bitnami/etcd] Add option to add extra labels only for snapshot cronjob (#21025) ([a0a38de](https://github.com/bitnami/charts/commit/a0a38deac2d1a4c5f83829f768b37af991d66efb)), closes [#21025](https://github.com/bitnami/charts/issues/21025) + +## 9.6.2 (2023-11-08) + +* [bitnami/etcd] Release 9.6.2 updating components versions (#20803) ([d9b6734](https://github.com/bitnami/charts/commit/d9b67347192a707fa40df9835f803225a4f6d4f9)), closes [#20803](https://github.com/bitnami/charts/issues/20803) + +## 9.6.1 (2023-11-08) + +* [bitnami/etcd] Release 9.6.1 updating components versions (#20727) ([b856c6b](https://github.com/bitnami/charts/commit/b856c6b32e0ddc700e02cc91f6de71d2f9d25a1d)), closes [#20727](https://github.com/bitnami/charts/issues/20727) + +## 9.6.0 (2023-10-31) + +* [bitnami/etcd] feat: :sparkles: Add support for PSA restricted policy (#20425) ([fbdc5f1](https://github.com/bitnami/charts/commit/fbdc5f182cec0bb77485c31a78c907c2d27e7142)), closes [#20425](https://github.com/bitnami/charts/issues/20425) + +## 9.5.7 (2023-10-27) + +* [bitnami/*] Rename VMware Application Catalog (#20361) ([3acc734](https://github.com/bitnami/charts/commit/3acc73472beb6fb56c4d99f929061001205bc57e)), closes [#20361](https://github.com/bitnami/charts/issues/20361) +* [bitnami/*] Skip image's tag in the README files of the Bitnami Charts (#19841) ([bb9a01b](https://github.com/bitnami/charts/commit/bb9a01b65911c87e48318db922cc05eb42785e42)), closes [#19841](https://github.com/bitnami/charts/issues/19841) +* [bitnami/*] Standardize documentation (#19835) ([af5f753](https://github.com/bitnami/charts/commit/af5f7530c1bc8c5ded53a6c4f7b8f384ac1804f2)), closes [#19835](https://github.com/bitnami/charts/issues/19835) +* [bitnami/etcd] Release 9.5.7 updating components versions (#20487) ([b27292c](https://github.com/bitnami/charts/commit/b27292c29f0b0459ced7f232b268380b935f548b)), closes [#20487](https://github.com/bitnami/charts/issues/20487) + +## 9.5.6 (2023-10-12) + +* [bitnami/etcd] Release 9.5.6 (#20130) ([ac7cf62](https://github.com/bitnami/charts/commit/ac7cf62bfb2aadb57d39b89683be9cc4eaf9b2a4)), closes [#20130](https://github.com/bitnami/charts/issues/20130) + +## 9.5.5 (2023-10-11) + +* [bitnami/etcd] Release 9.5.5 (#20037) ([83505b6](https://github.com/bitnami/charts/commit/83505b6adc6eb456fba11867c67a5b92183b3b8a)), closes [#20037](https://github.com/bitnami/charts/issues/20037) + +## 9.5.4 (2023-10-09) + +* [bitnami/etcd] Release 9.5.4 (#19907) ([036b22a](https://github.com/bitnami/charts/commit/036b22aa10eacd289c3ac3bbd1b8465f0232fde9)), closes [#19907](https://github.com/bitnami/charts/issues/19907) + +## 9.5.3 (2023-10-09) + +* [bitnami/etcd] Release 9.5.3 (#19821) ([80c4b3c](https://github.com/bitnami/charts/commit/80c4b3c922a05e63579c0e6ee006f7bfa286693e)), closes [#19821](https://github.com/bitnami/charts/issues/19821) + +## 9.5.2 (2023-10-09) + +* [bitnami/*] Update Helm charts prerequisites (#19745) ([eb755dd](https://github.com/bitnami/charts/commit/eb755dd36a4dd3cf6635be8e0598f9a7f4c4a554)), closes [#19745](https://github.com/bitnami/charts/issues/19745) +* [bitnami/etcd] bump bitnami/common (#19785) ([1d4a5c2](https://github.com/bitnami/charts/commit/1d4a5c2c47de57717238a3d79e6ef6303bc608fd)), closes [#19785](https://github.com/bitnami/charts/issues/19785) + +## 9.5.1 (2023-10-03) + +* [bitnami/etcd] Release 9.5.1 (#19703) ([71914d8](https://github.com/bitnami/charts/commit/71914d80a844c372298e27d39480fb9c9a99190f)), closes [#19703](https://github.com/bitnami/charts/issues/19703) +* Autogenerate schema files (#19194) ([a2c2090](https://github.com/bitnami/charts/commit/a2c2090b5ac97f47b745c8028c6452bf99739772)), closes [#19194](https://github.com/bitnami/charts/issues/19194) +* Revert "Autogenerate schema files (#19194)" (#19335) ([73d80be](https://github.com/bitnami/charts/commit/73d80be525c88fb4b8a54451a55acd506e337062)), closes [#19194](https://github.com/bitnami/charts/issues/19194) [#19335](https://github.com/bitnami/charts/issues/19335) + +## 9.5.0 (2023-09-11) + +* [bitnami/etcd] Adding seccompProfile in etcd (#19015) ([2e53be9](https://github.com/bitnami/charts/commit/2e53be9ba6263d3949c132c7a000b124f6e3ce17)), closes [#19015](https://github.com/bitnami/charts/issues/19015) + +## 9.4.3 (2023-09-08) + +* [bitnami/etcd: Use merge helper]: (#19035) ([a95e00a](https://github.com/bitnami/charts/commit/a95e00a8887d2a464f0d46741524d7a72f5014aa)), closes [#19035](https://github.com/bitnami/charts/issues/19035) + +## 9.4.2 (2023-09-06) + +* [bitnami/etcd] Release 9.4.2 (#19146) ([ce81ac7](https://github.com/bitnami/charts/commit/ce81ac772f73ec8be10a3abc7ec7c94cbc566903)), closes [#19146](https://github.com/bitnami/charts/issues/19146) + +## 9.4.1 (2023-08-29) + +* [bitnami/etcd] test: :recycle: Use ginkgo-utils in persistence test (#18918) ([1f73d2b](https://github.com/bitnami/charts/commit/1f73d2b09cd012c44912013bc2997b7ddf6131fa)), closes [#18918](https://github.com/bitnami/charts/issues/18918) + +## 9.4.0 (2023-08-25) + +* [bitnami/etcd] Add ETCD_LISTEN_METRICS_URLS (#18452) ([b7889dd](https://github.com/bitnami/charts/commit/b7889dd3de5fe8bd00c805b98bf2a75795781ac1)), closes [#18452](https://github.com/bitnami/charts/issues/18452) + +## 9.3.0 (2023-08-22) + +* [bitnami/etcd] Support for customizing standard labels (#18298) ([894eebb](https://github.com/bitnami/charts/commit/894eebba0e11265990b636fb3c161d1297f287fa)), closes [#18298](https://github.com/bitnami/charts/issues/18298) + +## 9.2.2 (2023-08-19) + +* [bitnami/etcd] Release 9.2.2 (#18657) ([0ac424c](https://github.com/bitnami/charts/commit/0ac424ce4c4710a5a39b6094c36fd033714c4e0e)), closes [#18657](https://github.com/bitnami/charts/issues/18657) + +## 9.2.1 (2023-08-17) + +* [bitnami/etcd] Release 9.2.1 (#18513) ([ad89f0f](https://github.com/bitnami/charts/commit/ad89f0f996501b92747cd2f93d5ad5a076ca890a)), closes [#18513](https://github.com/bitnami/charts/issues/18513) + +## 9.2.0 (2023-08-09) + +* [bitnami/etcd] only add the service to advertised addresses when it is enabled (#18211) ([ece67e0](https://github.com/bitnami/charts/commit/ece67e02c03466c44a5185d0d6f22f26844e7cdf)), closes [#18211](https://github.com/bitnami/charts/issues/18211) + +## 9.1.0 (2023-08-01) + +* [bitnami/etcd] Add initialClusterToken (#17802) ([443e30c](https://github.com/bitnami/charts/commit/443e30cd350100a00578e43f136ff96b3ee1899c)), closes [#17802](https://github.com/bitnami/charts/issues/17802) + +## 9.0.7 (2023-07-25) + +* [bitnami/etcd] Release 9.0.7 (#17884) ([9378270](https://github.com/bitnami/charts/commit/93782708afd4e1ee5498438ed14442e15096c7a0)), closes [#17884](https://github.com/bitnami/charts/issues/17884) + +## 9.0.6 (2023-07-19) + +* [bitnami/etcd] Add feature extraVolumeClaimTemplate (#17763) ([eaad838](https://github.com/bitnami/charts/commit/eaad8384150f0bdaa9203d56321975ef284acc81)), closes [#17763](https://github.com/bitnami/charts/issues/17763) + +## 9.0.5 (2023-07-15) + +* [bitnami/etcd] Release 9 (#17591) ([154de6c](https://github.com/bitnami/charts/commit/154de6c03ec1725a5d7493292b9d66996d6c7e47)), closes [#17591](https://github.com/bitnami/charts/issues/17591) + +## 9.0.4 (2023-06-30) + +* [bitnami/etcd] Release 9.0.4 (#17437) ([6010dfa](https://github.com/bitnami/charts/commit/6010dfafae68ad9e272585c36da4f7d579e3a4a0)), closes [#17437](https://github.com/bitnami/charts/issues/17437) + +## 9.0.3 (2023-06-30) + +* [bitnami/etcd] Release 9.0.3 (#17423) ([78cb8ce](https://github.com/bitnami/charts/commit/78cb8ce2d01ae16090074d045ff93a647d72fa7f)), closes [#17423](https://github.com/bitnami/charts/issues/17423) + +## 9.0.2 (2023-06-29) + +* [bitnami/etcd] Release 9.0.2 (#17405) ([475363d](https://github.com/bitnami/charts/commit/475363d4458a275d71fd1970dcde6257191bc293)), closes [#17405](https://github.com/bitnami/charts/issues/17405) +* Add copyright header (#17300) ([da68be8](https://github.com/bitnami/charts/commit/da68be8e951225133c7dfb572d5101ca3d61c5ae)), closes [#17300](https://github.com/bitnami/charts/issues/17300) +* Update charts readme (#17217) ([31b3c0a](https://github.com/bitnami/charts/commit/31b3c0afd968ff4429107e34101f7509e6a0e913)), closes [#17217](https://github.com/bitnami/charts/issues/17217) + +## 9.0.1 (2023-06-20) + +* [bitnami/etcd] Release 9.0.1 (#17204) ([4f9edd0](https://github.com/bitnami/charts/commit/4f9edd03cbfdf7854a8c113f08e3b76e3ca86134)), closes [#17204](https://github.com/bitnami/charts/issues/17204) + +## 9.0.0 (2023-06-16) + +* [bitnami/*] Change copyright section in READMEs (#17006) ([ef986a1](https://github.com/bitnami/charts/commit/ef986a1605241102b3dcafe9fd8089e6fc1201ad)), closes [#17006](https://github.com/bitnami/charts/issues/17006) +* [bitnami/etcd] Fix label selector for pdb (#17001) ([6aa39d8](https://github.com/bitnami/charts/commit/6aa39d84cbe4cb887123d290298649dcca9e47a9)), closes [#17001](https://github.com/bitnami/charts/issues/17001) +* [bitnami/several] Change copyright section in READMEs (#16989) ([5b6a5cf](https://github.com/bitnami/charts/commit/5b6a5cfb7625a751848a2e5cd796bd7278f406ca)), closes [#16989](https://github.com/bitnami/charts/issues/16989) + +## 8.12.0 (2023-06-01) + +* [bitnami/etcd] Fix subpath for snapshots (#16714) ([0db19a3](https://github.com/bitnami/charts/commit/0db19a3d23d74a3e45e2f38302c3367e851ea5c8)), closes [#16714](https://github.com/bitnami/charts/issues/16714) + +## 8.11.4 (2023-05-21) + +* [bitnami/etcd] Release 8.11.4 (#16761) ([d8959dd](https://github.com/bitnami/charts/commit/d8959dd9b5d2df2b97fb749d1d06c2bf15c29bbd)), closes [#16761](https://github.com/bitnami/charts/issues/16761) + +## 8.11.3 (2023-05-17) + +* [bitnami/etcd] Fix `service.clusterIP` only works when `service.type` is `ClusterIP` (#16390) ([9f2a5a8](https://github.com/bitnami/charts/commit/9f2a5a809b9b97c8519792d17f9a9ba28fd4ed3f)), closes [#16390](https://github.com/bitnami/charts/issues/16390) + +## 8.11.2 (2023-05-11) + +* [bitnami/etcd] Release 8.11.2 (#16592) ([d3340f7](https://github.com/bitnami/charts/commit/d3340f7c5866456fa52c7d53b040e666424de43b)), closes [#16592](https://github.com/bitnami/charts/issues/16592) +* Add wording for enterprise page (#16560) ([8f22774](https://github.com/bitnami/charts/commit/8f2277440b976d52785ba9149762ad8620a73d1f)), closes [#16560](https://github.com/bitnami/charts/issues/16560) + +## 8.11.1 (2023-05-10) + +* Update with a commnet the values.yaml (#16548) ([ee32086](https://github.com/bitnami/charts/commit/ee32086d52bf3fde698305e3f891362a4e1c4578)), closes [#16548](https://github.com/bitnami/charts/issues/16548) + +## 8.11.0 (2023-05-09) + +* [bitnami/several] Adapt Chart.yaml to set desired OCI annotations (#16546) ([fc9b18f](https://github.com/bitnami/charts/commit/fc9b18f2e98805d4df629acbcde696f44f973344)), closes [#16546](https://github.com/bitnami/charts/issues/16546) + +## 8.10.2 (2023-05-09) + +* [bitnami/etcd] Release 8.10.2 (#16453) ([157e8be](https://github.com/bitnami/charts/commit/157e8be3b52d550f168ab8a5de98c48fb6d125e8)), closes [#16453](https://github.com/bitnami/charts/issues/16453) + +## 8.10.1 (2023-04-24) + +* [bitnami/etcd] Release 8.10.1 (#16203) ([f28776d](https://github.com/bitnami/charts/commit/f28776d0d2895ce9872c4b996cec6c159f409952)), closes [#16203](https://github.com/bitnami/charts/issues/16203) + +## 8.10.0 (2023-04-20) + +* [bitnami/*] Make Helm charts 100% OCI (#15998) ([8841510](https://github.com/bitnami/charts/commit/884151035efcbf2e1b3206e7def85511073fb57d)), closes [#15998](https://github.com/bitnami/charts/issues/15998) + +## 8.9.0 (2023-04-17) + +* [bitnami/etcd] Add support for dynamic snapshot dir in snapshotter cronjob (#16023) ([7726f63](https://github.com/bitnami/charts/commit/7726f63b2e11f4889e3b06ba07708fa252939ce1)), closes [#16023](https://github.com/bitnami/charts/issues/16023) + +## 8.8.3 (2023-04-13) + +* [bitnami/etcd] Release 8.8.3 (#16047) ([a522a96](https://github.com/bitnami/charts/commit/a522a96257b970671548181049ac764189990194)), closes [#16047](https://github.com/bitnami/charts/issues/16047) + +## 8.8.2 (2023-04-05) + +* Use common.labels.matchLabels instead of common.labels.standard in networkpolicy matchLabels (#15950 ([3874554](https://github.com/bitnami/charts/commit/3874554828440331d719eec4397cd191a3f8fa7b)), closes [#15950](https://github.com/bitnami/charts/issues/15950) + +## 8.8.1 (2023-03-31) + +* [bitnami/etcd] Release 8.8.1 (#15826) ([180c95c](https://github.com/bitnami/charts/commit/180c95cc3d6df6fe500f70f93f8911c8b333e1d7)), closes [#15826](https://github.com/bitnami/charts/issues/15826) + +## 8.8.0 (2023-03-10) + +* [bitnami/etcd] Add support for service.headless.annotations (#15427) ([b00169e](https://github.com/bitnami/charts/commit/b00169e16d2645ac2600c8f6df0ec6e2ed30c8fc)), closes [#15427](https://github.com/bitnami/charts/issues/15427) + +## 8.7.7 (2023-03-08) + +* [bitnami/charts] Apply linter to README files (#15357) ([0e29e60](https://github.com/bitnami/charts/commit/0e29e600d3adc8b1b46e506eccb3decfab3b4e63)), closes [#15357](https://github.com/bitnami/charts/issues/15357) +* [bitnami/etcd] add PVC labels (#15350) ([562aee0](https://github.com/bitnami/charts/commit/562aee0122715b93adc384885244021cec7a3e95)), closes [#15350](https://github.com/bitnami/charts/issues/15350) + +## 8.7.6 (2023-03-01) + +* [bitnami/etcd] Release 8.7.6 (#15196) ([d8f63d4](https://github.com/bitnami/charts/commit/d8f63d45e8754c0d330e9075f8db22d0b5cdd7ba)), closes [#15196](https://github.com/bitnami/charts/issues/15196) +* Fixes dead links (#15065) ([9e99fd9](https://github.com/bitnami/charts/commit/9e99fd94d89605c2aa47417ae80eecb86719d904)), closes [#15065](https://github.com/bitnami/charts/issues/15065) + +## 8.7.5 (2023-02-17) + +* [bitnami/*] Fix markdown linter issues 2 (#14890) ([aa96572](https://github.com/bitnami/charts/commit/aa9657237ee8df4a46db0d7fdf8a23230dd6902a)), closes [#14890](https://github.com/bitnami/charts/issues/14890) +* [bitnami/etcd] Release 8.7.5 (#14949) ([1f6d3bc](https://github.com/bitnami/charts/commit/1f6d3bcfddd144e96d1be38fbaf022b4afe65f90)), closes [#14949](https://github.com/bitnami/charts/issues/14949) + +## 8.7.4 (2023-02-14) + +* [bitnami/*] Change copyright date (#14682) ([add4ec7](https://github.com/bitnami/charts/commit/add4ec701108ac36ed4de2dffbdf407a0d091067)), closes [#14682](https://github.com/bitnami/charts/issues/14682) +* [bitnami/*] Fix markdown linter issues (#14874) ([a51e0e8](https://github.com/bitnami/charts/commit/a51e0e8d35495b907f3e70dd2f8e7c3bcbf4166a)), closes [#14874](https://github.com/bitnami/charts/issues/14874) +* [bitnami/*] Unify READMEs (#14472) ([2064fb8](https://github.com/bitnami/charts/commit/2064fb8dcc78a845cdede8211af8c3cc52551161)), closes [#14472](https://github.com/bitnami/charts/issues/14472) +* [bitnami/etcd] Release 8.7.4 (#14883) ([e0e5a39](https://github.com/bitnami/charts/commit/e0e5a39fcb0bd45aebb0a2d1de083839fdcae18e)), closes [#14883](https://github.com/bitnami/charts/issues/14883) + +## 8.7.3 (2023-01-20) + +* [bitnami/etcd] Release 8.7.3 (#14476) ([1afa79c](https://github.com/bitnami/charts/commit/1afa79c20c4dc6906ac388e896484d42b797be62)), closes [#14476](https://github.com/bitnami/charts/issues/14476) + +## 8.7.2 (2023-01-18) + +* [bitnami/*] Change licenses annotation format (#14377) ([0ab7608](https://github.com/bitnami/charts/commit/0ab760862c660fcc78cffadf8e1d8cdd70881473)), closes [#14377](https://github.com/bitnami/charts/issues/14377) +* [bitnami/etcd] Release 8.7.2 (#14430) ([1aacd78](https://github.com/bitnami/charts/commit/1aacd7884ed8dee3a1ad84a5df026dd1cf04255a)), closes [#14430](https://github.com/bitnami/charts/issues/14430) + +## 8.7.1 (2023-01-16) + +* Fix an issue where the etcd chart renders the jwt secret when the auth token is disabled (#14364) ([0b18941](https://github.com/bitnami/charts/commit/0b189417ed2725c9c33918103215025494966313)), closes [#14364](https://github.com/bitnami/charts/issues/14364) + +## 8.7.0 (2023-01-13) + +* [bitnami/*] Add license annotation and remove obsolete engine parameter (#14293) ([da2a794](https://github.com/bitnami/charts/commit/da2a7943bae95b6e9b5b4ed972c15e990b69fdb0)), closes [#14293](https://github.com/bitnami/charts/issues/14293) +* [bitnami/etcd] Add param auth.token.enabled to disable etcd auth (#14273) ([210f917](https://github.com/bitnami/charts/commit/210f9175798832ae1b4ffbba6b5a40cd6ec10ba3)), closes [#14273](https://github.com/bitnami/charts/issues/14273) + +## 8.6.0 (2023-01-09) + +* [bitnami/etcd] Add support for shareProcessNamespace (#14018) ([0c4ed33](https://github.com/bitnami/charts/commit/0c4ed330e20726226e6dc1fb4371870c4f93b027)), closes [#14018](https://github.com/bitnami/charts/issues/14018) + +## 8.5.11 (2022-12-19) + +* [bitnami/etcd] Release 8.5.11 (#14027) ([2ae2f7c](https://github.com/bitnami/charts/commit/2ae2f7cf0c4198db02f118e3f004fb877bd20443)), closes [#14027](https://github.com/bitnami/charts/issues/14027) + +## 8.5.10 (2022-11-21) + +* [bitnami/etcd] Release 8.5.10 (#13620) ([f014842](https://github.com/bitnami/charts/commit/f0148427d042a2823e665daef0ceb9230a09e0cb)), closes [#13620](https://github.com/bitnami/charts/issues/13620) + +## 8.5.9 (2022-11-20) + +* [bitnami/etcd] Release 8.5.9 (#13609) ([52974e4](https://github.com/bitnami/charts/commit/52974e48db346800358be75f5cb3cb6f46ec68b7)), closes [#13609](https://github.com/bitnami/charts/issues/13609) + +## 8.5.8 (2022-10-21) + +* [bitnami/etcd] Release 8.5.8 (#13076) ([1ea11eb](https://github.com/bitnami/charts/commit/1ea11eb4ac9d5a9921ae4db99b85a0c6dce81cef)), closes [#13076](https://github.com/bitnami/charts/issues/13076) + +## 8.5.7 (2022-10-19) + +* [bitnami/*] Use new default branch name in links (#12943) ([a529e02](https://github.com/bitnami/charts/commit/a529e02597d49d944eba1eb0f190713293247176)), closes [#12943](https://github.com/bitnami/charts/issues/12943) +* [bitnami/etcd] Release 8.5.7 (#13028) ([7f24872](https://github.com/bitnami/charts/commit/7f248725ffa1efaff7b4eb15aada0e9956644ee0)), closes [#13028](https://github.com/bitnami/charts/issues/13028) + +## 8.5.6 (2022-10-14) + +* adjust the naming style for resources to make them not pass the length limit (#12955) ([523d82d](https://github.com/bitnami/charts/commit/523d82d90e86681a46f6d195819d27842fe6d13b)), closes [#12955](https://github.com/bitnami/charts/issues/12955) +* Generic README instructions related to the repo (#12792) ([3cf6b10](https://github.com/bitnami/charts/commit/3cf6b10e10e60df4b3e191d6b99aa99a9f597755)), closes [#12792](https://github.com/bitnami/charts/issues/12792) + +## 8.5.5 (2022-09-22) + +* [bitnami/etcd] Use custom probes if given (#12494) ([353c06e](https://github.com/bitnami/charts/commit/353c06e8d96414cdc005bbd0e0e86712c7e362d8)), closes [#12494](https://github.com/bitnami/charts/issues/12494) [#12354](https://github.com/bitnami/charts/issues/12354) + +## 8.5.4 (2022-09-20) + +* [bitnami/etcd] Make ETCD_LOG_LEVEL a parameter (#12347) ([d775261](https://github.com/bitnami/charts/commit/d7752613c71864766975000fe43f3170777d5e41)), closes [#12347](https://github.com/bitnami/charts/issues/12347) + +## 8.5.3 (2022-09-19) + +* [bitnami/etcd] Fix API Version for CronJob of etcd (#12449) ([fdc4d7d](https://github.com/bitnami/charts/commit/fdc4d7d9309ff761bd36d46459d1794946fc7b7f)), closes [#12449](https://github.com/bitnami/charts/issues/12449) +* [bitnami/etcd] Release 8.5.3 (#12575) ([ef65a30](https://github.com/bitnami/charts/commit/ef65a305ec3a1a429d6fe7435ae1c97c6f4224ad)), closes [#12575](https://github.com/bitnami/charts/issues/12575) + +## 8.5.2 (2022-09-16) + +* [bitnami/etcd] Reuse etcd jwt token while upgrade (#12019) ([b29606f](https://github.com/bitnami/charts/commit/b29606f6208aaee5a4766bd65d286b23a56f327b)), closes [#12019](https://github.com/bitnami/charts/issues/12019) + +## 8.5.1 (2022-09-15) + +* [bitnami/etcd] Release 8.5.1 (#12445) ([4c2d100](https://github.com/bitnami/charts/commit/4c2d1007f87b354b2a7368c260a2744314585fde)), closes [#12445](https://github.com/bitnami/charts/issues/12445) + +## 8.5.0 (2022-09-14) + +* [bitnami/etcd] Disallowing privilege escalation for etcd (#12311) ([a448711](https://github.com/bitnami/charts/commit/a44871102a05676eff7a75fccc27755b298deed9)), closes [#12311](https://github.com/bitnami/charts/issues/12311) + +## 8.4.5 (2022-09-05) + +* [bitnami/etcd] Bump chart version ([ae3a7d2](https://github.com/bitnami/charts/commit/ae3a7d23ccdf9ae61a4d744dea6eb1cad571d76e)) +* [bitnami/etcd] Fix pvc deletion when trying to restore from snapshot (#12261) ([ff48cf4](https://github.com/bitnami/charts/commit/ff48cf487d3d2cf734d5ac95cef83ee1299143d8)), closes [#12261](https://github.com/bitnami/charts/issues/12261) + +## 8.4.4 (2022-09-03) + +* [bitnami/etcd] Release 8.4.4 (#12269) ([758faa1](https://github.com/bitnami/charts/commit/758faa15d807c95cdca2af77bbd4bfa55e82cf4c)), closes [#12269](https://github.com/bitnami/charts/issues/12269) + +## 8.4.3 (2022-08-23) + +* [bitnami/etcd] Update Chart.lock (#12032) ([36f82a9](https://github.com/bitnami/charts/commit/36f82a995efc712510a981247afe8173307d3f20)), closes [#12032](https://github.com/bitnami/charts/issues/12032) + +## 8.4.2 (2022-08-23) + +* [bitnami/etcd] Bump bitnami/common major version (#12010) ([08d9e4f](https://github.com/bitnami/charts/commit/08d9e4f95334a3dd64640edfc596b8e4d5c52c3a)), closes [#12010](https://github.com/bitnami/charts/issues/12010) + +## 8.4.1 (2022-08-22) + +* [bitnami/etcd] Update Chart.lock (#11993) ([c69a029](https://github.com/bitnami/charts/commit/c69a0296eed2f63089a7b79a63342e35c9be6235)), closes [#11993](https://github.com/bitnami/charts/issues/11993) + +## 8.4.0 (2022-08-18) + +* [bitnami/etcd] Add support for image digest apart from tag (#11798) ([30a4ec9](https://github.com/bitnami/charts/commit/30a4ec9a42f0f4c1f54fb19e0fe7f5d1a0b7134a)), closes [#11798](https://github.com/bitnami/charts/issues/11798) + +## 8.3.8 (2022-08-09) + +* [binami/etcd] Fix command for cronjob when diagnostic and disaster recovery are enabled (#11637) ([a6a7812](https://github.com/bitnami/charts/commit/a6a7812472552b58b78bdf74feafb64afffcc5b7)), closes [#11637](https://github.com/bitnami/charts/issues/11637) + +## 8.3.7 (2022-08-04) + +* [bitnami/etcd] Release 8.3.7 updating components versions ([92e7250](https://github.com/bitnami/charts/commit/92e72507d361e74606c9461aaf82994e013c326e)) + +## 8.3.6 (2022-08-03) + +* [bitnami/etcd] Release 8.3.6 updating components versions ([a0eeae1](https://github.com/bitnami/charts/commit/a0eeae13185c737ed82e7c0310158718c186f61d)) + +## 8.3.5 (2022-08-02) + +* [bitnami/*] Update URLs to point to the new bitnami/containers monorepo (#11352) ([d665af0](https://github.com/bitnami/charts/commit/d665af0c708846192d8d5fb2f5f9ea65dd464ab0)), closes [#11352](https://github.com/bitnami/charts/issues/11352) +* [bitnami/etcd] Release 8.3.5 updating components versions ([d29c674](https://github.com/bitnami/charts/commit/d29c674ff4632c77c1d7acbcfdea3614a931810a)) + +## 8.3.4 (2022-07-14) + +* [bitnami/etcd] Release 8.3.4 updating components versions ([430394a](https://github.com/bitnami/charts/commit/430394a7972a92a183f7f19cb5db35d6d85c8cdb)) + +## 8.3.3 (2022-06-30) + +* [bitnami/etcd] Release 8.3.3 updating components versions ([5e3cacb](https://github.com/bitnami/charts/commit/5e3cacb0836695816c21d5456eb52be7523b5299)) + +## 8.3.2 (2022-06-25) + +* [bitnami/etcd] Release 8.3.2 updating components versions ([3f7c2c1](https://github.com/bitnami/charts/commit/3f7c2c1f9c1e25ad574e39d0d1b28ef4b2fa59f1)) + +## 8.3.1 (2022-06-24) + +* [bitnami/etcd] Release 8.3.1 updating components versions ([b005137](https://github.com/bitnami/charts/commit/b005137bdab4067ceae2a19a13d5cacb333ca29b)) + +## 8.3.0 (2022-06-13) + +* [bitnami/etcd] add runtime class support (#10709) ([d363117](https://github.com/bitnami/charts/commit/d36311748078c08e2ad5a8cc64b2c02007304636)), closes [#10709](https://github.com/bitnami/charts/issues/10709) + +## 8.2.6 (2022-06-10) + +* [bitnami/*] Replace Kubeapps URL in READMEs (and kubeapps Chart.yaml) and remove BKPR references (#1 ([c6a7914](https://github.com/bitnami/charts/commit/c6a7914361e5aea6016fb45bf4d621edfd111d32)), closes [#10600](https://github.com/bitnami/charts/issues/10600) +* [bitnami/etcd] Release 8.2.6 updating components versions ([e1639c0](https://github.com/bitnami/charts/commit/e1639c09b6e37f28978a06eed04b3e83bde1861a)) + +## 8.2.5 (2022-06-06) + +* [bitnami/etcd] Release 8.2.5 updating components versions ([1faaec1](https://github.com/bitnami/charts/commit/1faaec17c046a52747bd5eb0dc0b442757e9cc55)) + +## 8.2.4 (2022-06-01) + +* [bitnami/several] Replace maintainers email by url (#10523) ([ff3cf61](https://github.com/bitnami/charts/commit/ff3cf617a1680509b0f3855d17c4ccff7b29a0ff)), closes [#10523](https://github.com/bitnami/charts/issues/10523) + +## 8.2.3 (2022-05-30) + +* [bitnami/several] Replace base64 --decode with base64 -d (#10495) ([099286a](https://github.com/bitnami/charts/commit/099286ae7a87784cf809df0b64ab24f7ff0144c8)), closes [#10495](https://github.com/bitnami/charts/issues/10495) + +## 8.2.2 (2022-05-21) + +* [bitnami/etcd] Release 8.2.2 updating components versions ([100a61e](https://github.com/bitnami/charts/commit/100a61ed978bcae231c6804a816e2cd121471f50)) + +## 8.2.1 (2022-05-20) + +* [bitnami/etcd] Release 8.2.1 updating components versions ([bf89f82](https://github.com/bitnami/charts/commit/bf89f82041d7b65c1f5f40656e54822ad3992eaf)) + +## 8.2.0 (2022-05-19) + +* [bitnami/etcd] add prometheus rule (#10299) ([42de0f5](https://github.com/bitnami/charts/commit/42de0f503ae60418e7c475802eba7636fa9dc34c)), closes [#10299](https://github.com/bitnami/charts/issues/10299) + +## 8.1.3 (2022-05-19) + +* [bitnami/etcd] Release 8.1.3 updating components versions ([ddbf52d](https://github.com/bitnami/charts/commit/ddbf52d4fe97f1b458271bcc2333f633945cecf4)) + +## 8.1.2 (2022-05-18) + +* [bitnami/*] Remove old 'ci' files (#10171) ([5df30c4](https://github.com/bitnami/charts/commit/5df30c44dbd1812da8786579ce4a94917d46a6ad)), closes [#10171](https://github.com/bitnami/charts/issues/10171) +* [bitnami/etcd] Release 8.1.2 updating components versions ([f737e91](https://github.com/bitnami/charts/commit/f737e9113c1b442b3cb364e0f7d7bc159e700d98)) + +## 8.1.1 (2022-05-11) + +* [bitnami/etcd] added persistentVolumeClaimRetentionPolicy control to statefulset spec (#10058) ([a019f12](https://github.com/bitnami/charts/commit/a019f12c20b3ffb456779b129b009e81805c549e)), closes [#10058](https://github.com/bitnami/charts/issues/10058) + +## 8.1.0 (2022-04-29) + +* [bitnami/etcd] PodDisruptionBudget should be enabled by default (#9929) ([a4a0c16](https://github.com/bitnami/charts/commit/a4a0c163c3e3bb5201187f5ca3e255e185aea360)), closes [#9929](https://github.com/bitnami/charts/issues/9929) + +## 8.0.2 (2022-04-24) + +* [bitnami/etcd] Release 8.0.2 updating components versions ([28d94d3](https://github.com/bitnami/charts/commit/28d94d39699a3e80cd4e854d21a8124ba951c2de)) + +## 8.0.1 (2022-04-20) + +* [bitnami/etcd] Release 8.0.1 updating components versions ([a792f42](https://github.com/bitnami/charts/commit/a792f4206191e43b850530ca13dd31f08a9227d5)) + +## 8.0.0 (2022-04-20) + +* [bitnami/etcd] Update etcd version (#9820) ([e2dec6b](https://github.com/bitnami/charts/commit/e2dec6b185b5a1c213b6acc4a06b6666b7d26fa5)), closes [#9820](https://github.com/bitnami/charts/issues/9820) + +## 7.0.6 (2022-04-20) + +* [bitnami/etcd] Release 7.0.6 updating components versions ([c04b33d](https://github.com/bitnami/charts/commit/c04b33d5cb73e8e8b637c3c58e5c7a6ea24ce385)) + +## 7.0.5 (2022-04-19) + +* [bitnami/etcd] Release 7.0.5 updating components versions ([2a662b8](https://github.com/bitnami/charts/commit/2a662b851b7cf3b713606a84f9406059647f6cd9)) + +## 7.0.4 (2022-04-13) + +* [bitnami/etcd] Release 7.0.4 updating components versions ([ac1a499](https://github.com/bitnami/charts/commit/ac1a4997443d56a782e8e32c403d21764c43fc9f)) + +## 7.0.3 (2022-04-13) + +* This fixes #9720 (#9735) ([1dc3acd](https://github.com/bitnami/charts/commit/1dc3acd3c5a88ee6fc95f6d204869ae85ccce23b)), closes [#9720](https://github.com/bitnami/charts/issues/9720) [#9735](https://github.com/bitnami/charts/issues/9735) + +## 7.0.2 (2022-04-06) + +* [bitnami/etcd]: fix: :bug: Use StatefulSet to calculate peers (#9708) ([54af93c](https://github.com/bitnami/charts/commit/54af93c1e6ad05585456e6741606362be3c3b20e)), closes [#9708](https://github.com/bitnami/charts/issues/9708) + +## 7.0.1 (2022-04-02) + +* [bitnami/etcd] Release 7.0.1 updating components versions ([a588d7f](https://github.com/bitnami/charts/commit/a588d7f2a6a8fffac86a72c3c927d034e740e87c)) + +## 7.0.0 (2022-03-30) + +* [bitnami/etcd] fix!: :arrow_down: Downgrade default etcd version to 3.4. (#9628) ([522c8f7](https://github.com/bitnami/charts/commit/522c8f7e00b23cd909ff30038ca79bfee405f8e1)), closes [#9628](https://github.com/bitnami/charts/issues/9628) + +## 6.13.9 (2022-03-28) + +* [bitnami/etcd] Release 6.13.9 updating components versions ([d6cc5b0](https://github.com/bitnami/charts/commit/d6cc5b037645360fb69bdd2f66439cbe96841346)) + +## 6.13.8 (2022-03-27) + +* [bitnami/etcd] Release 6.13.8 updating components versions ([f636c88](https://github.com/bitnami/charts/commit/f636c8872e2203eb3fff0c72469076634d54f489)) + +## 6.13.7 (2022-03-17) + +* [bitnami/etcd] Release 6.13.7 updating components versions ([586ec34](https://github.com/bitnami/charts/commit/586ec341f38a0d3d93aec177cc577614c729125b)) + +## 6.13.6 (2022-03-16) + +* [bitnami/etcd] Release 6.13.6 updating components versions ([5445e1a](https://github.com/bitnami/charts/commit/5445e1a4b0164dd118e936c526b4f63f9a391bf7)) + +## 6.13.5 (2022-02-27) + +* [bitnami/etcd] Release 6.13.5 updating components versions ([aaf7774](https://github.com/bitnami/charts/commit/aaf777427afd6f83836999a31781c0bd6b440c5c)) +* Update README.md ([d57e505](https://github.com/bitnami/charts/commit/d57e505587e71d058bdce88ab16ca608da138dfb)) + +## 6.13.4 (2022-02-21) + +* [bitnami/etcd] Do not hardcode PDB apiVersion (#9094) ([7f6a195](https://github.com/bitnami/charts/commit/7f6a1952d3c2a6fe80b1f71ed60c8d6afbbd147f)), closes [#9094](https://github.com/bitnami/charts/issues/9094) +* Non utf8 chars (#8923) ([6ffd18f](https://github.com/bitnami/charts/commit/6ffd18fbbdf10e94ea1a90cf5b84ef610ac2a72d)), closes [#8923](https://github.com/bitnami/charts/issues/8923) + +## 6.13.3 (2022-02-01) + +* [bitnami/etcd] Release 6.13.3 updating components versions ([f96b19c](https://github.com/bitnami/charts/commit/f96b19cfe4b0527d49fedc459e89e5c37e836168)) + +## 6.13.2 (2022-01-20) + +* [bitnami/*] Update READMEs (#8716) ([b9a9533](https://github.com/bitnami/charts/commit/b9a953337590eb2979453385874a267bacf50936)), closes [#8716](https://github.com/bitnami/charts/issues/8716) +* [bitnami/several] Change prerequisites (#8725) ([8d740c5](https://github.com/bitnami/charts/commit/8d740c566cfdb7e2d933c40128b4e919fce953a5)), closes [#8725](https://github.com/bitnami/charts/issues/8725) + +## 6.13.1 (2022-01-18) + +* [bitnami/etcd] Release 6.13.1 updating components versions ([76351d0](https://github.com/bitnami/charts/commit/76351d00decaa008cbf149fe1d626bba4bb9b48c)) + +## 6.13.0 (2022-01-18) + +* [bitnami/*] Readme automation (#8579) ([78d1938](https://github.com/bitnami/charts/commit/78d193831c900d178198491ffd08fa2217a64ecd)), closes [#8579](https://github.com/bitnami/charts/issues/8579) +* [bitnami/etcd] Chart standardised (#7926) ([c3f6f9a](https://github.com/bitnami/charts/commit/c3f6f9a4a6f5f81038a33245e198557ba4b19024)), closes [#7926](https://github.com/bitnami/charts/issues/7926) + +## 6.12.2 (2022-01-15) + +* [bitnami/etcd] Release 6.12.2 updating components versions ([d0bef1e](https://github.com/bitnami/charts/commit/d0bef1ec41ed5f4e987b744c1aec3c5e1308f182)) + +## 6.12.1 (2022-01-11) + +* [bitnami/etcd] Fix values metadata (#8611) ([1e29708](https://github.com/bitnami/charts/commit/1e297085133ba7f0422a0ff0acbc46049e1e905c)), closes [#8611](https://github.com/bitnami/charts/issues/8611) + +## 6.12.0 (2022-01-07) + +* [bitnami/etcd] Allow to override schedulerName (#8595) ([b116b7e](https://github.com/bitnami/charts/commit/b116b7e9a4472e4b9dec27be4f400eb18c1138e0)), closes [#8595](https://github.com/bitnami/charts/issues/8595) + +## 6.11.0 (2022-01-05) + +* [bitnami/several] Adapt templating format (#8562) ([8cad18a](https://github.com/bitnami/charts/commit/8cad18aed9966a6f0208e5ad6cee46cb217f47ab)), closes [#8562](https://github.com/bitnami/charts/issues/8562) + +## 6.10.8 (2022-01-04) + +* [bitnami/several] Add license to the README ([05f7633](https://github.com/bitnami/charts/commit/05f763372501d596e57db713dd53ff4ff3027cc4)) +* [bitnami/several] Add license to the README ([32fb238](https://github.com/bitnami/charts/commit/32fb238e60a0affc6debd3142eaa3c3d9089ec2a)) +* [bitnami/several] Add license to the README ([b87c2f7](https://github.com/bitnami/charts/commit/b87c2f7899d48a8b02c506765e6ae82937e9ba3f)) +* [bitnami/several] Fix issue with quote when followed by }} (#8561) ([58077af](https://github.com/bitnami/charts/commit/58077af58f40e74e7af41485bcca493e38523537)), closes [#8561](https://github.com/bitnami/charts/issues/8561) + +## 6.10.7 (2022-01-03) + +* [bitnami/etcd] Change hardcoded password key with etcd.secretPasswordKey (#8442) ([d63c26c](https://github.com/bitnami/charts/commit/d63c26cff705339f54ab997b0dab721b43ed3e27)), closes [#8442](https://github.com/bitnami/charts/issues/8442) + +## 6.10.6 (2021-12-16) + +* [bitnami/etcd] Release 6.10.6 updating components versions ([28c022a](https://github.com/bitnami/charts/commit/28c022a6fc4b5e393c44e7e6764b8fed6284086d)) + +## 6.10.5 (2021-12-09) + +* [bitnami/cassandra,etcd,influxdb,metallb,mysql,postgresql,postgresql-ha,redis] Align networkpolicy ([0404b1a](https://github.com/bitnami/charts/commit/0404b1aa52a4514eee06143a7ce85307f16af6d3)), closes [#8336](https://github.com/bitnami/charts/issues/8336) + +## 6.10.4 (2021-11-29) + +* [bitnami/several] Fix deadlinks in README.md (#8215) ([99e90d2](https://github.com/bitnami/charts/commit/99e90d244b3244e059a42f72dcbecd3cda2b66bb)), closes [#8215](https://github.com/bitnami/charts/issues/8215) +* [bitnami/several] Regenerate README tables ([7b091c0](https://github.com/bitnami/charts/commit/7b091c0808ef00425c404cb97fe73c0578ccf1a3)) +* [bitnami/several] Replace HTTP by HTTPS when possible (#8259) ([eafb5bd](https://github.com/bitnami/charts/commit/eafb5bd5a2cc3aaf04fc1e8ebedd73f420d76864)), closes [#8259](https://github.com/bitnami/charts/issues/8259) + +## 6.10.3 (2021-11-19) + +* [bitnami/etcd] Fix etcd when using a custom svc port (#8189) ([7fb8d7e](https://github.com/bitnami/charts/commit/7fb8d7e713ee15a5e32c88db93552749a9f29979)), closes [#8189](https://github.com/bitnami/charts/issues/8189) + +## 6.10.2 (2021-11-18) + +* [bitnami/etcd] Release 6.10.2 updating components versions ([e300a10](https://github.com/bitnami/charts/commit/e300a107785b052e7dd1aae50943ea549c1fc52c)) + +## 6.10.1 (2021-11-18) + +* [bitnami/etcd] ETCD_LISTEN_* to use container ports instead of svc ports (#8180) ([1ca4cbf](https://github.com/bitnami/charts/commit/1ca4cbf005a345c6e622fc76aff22285dfabeb11)), closes [#8180](https://github.com/bitnami/charts/issues/8180) + +## 6.10.0 (2021-11-04) + +* feat: add etcd nodeselctor & toleration on disasterRecovery (#7984) ([93ce746](https://github.com/bitnami/charts/commit/93ce746dceedeeafdff36edf49f672a52f040df8)), closes [#7984](https://github.com/bitnami/charts/issues/7984) +* [bitnami/several] Regenerate README tables ([412cf6a](https://github.com/bitnami/charts/commit/412cf6a513cb0c03444a6e7811c6f27193239a10)) + +## 6.9.4 (2021-10-27) + +* [bitnami/etcd] Release 6.9.4 updating components versions ([0e507a7](https://github.com/bitnami/charts/commit/0e507a7e806f16bcea14029f34daea6a8b5728ad)) + +## 6.9.3 (2021-10-22) + +* [bitnami/several] Add chart info to NOTES.txt (#7889) ([a6751cd](https://github.com/bitnami/charts/commit/a6751cdd33c461fabbc459fbea6f219ec64ab6b2)), closes [#7889](https://github.com/bitnami/charts/issues/7889) + +## 6.9.2 (2021-10-19) + +* [bitnami/several] Change pullPolicy for bitnami-shell image (#7852) ([9711a33](https://github.com/bitnami/charts/commit/9711a33c6eec72ea79143c4b7574dbe6a148d6b2)), closes [#7852](https://github.com/bitnami/charts/issues/7852) +* [bitnami/several] Regenerate README tables ([0a26eaa](https://github.com/bitnami/charts/commit/0a26eaafd6ad234046aa91c79a44a1d45f55724e)) + +## 6.9.1 (2021-10-15) + +* [bitnami/etcd] Release 6.9.1 updating components versions ([d3c5418](https://github.com/bitnami/charts/commit/d3c5418a2b826dc89c213c318c580bff8a0493c8)) + +## 6.9.0 (2021-10-08) + +* [bitnami/etcd] Add support for customizing authentication token type (#7735) ([9f9d8aa](https://github.com/bitnami/charts/commit/9f9d8aa887608e39aaab4ca1a80677605825b888)), closes [#7735](https://github.com/bitnami/charts/issues/7735) +* [bitnami/several] Regenerate README tables ([ff170d1](https://github.com/bitnami/charts/commit/ff170d10f8aa6dae0f1e5c3f7d1c69fcec96b731)) + +## 6.8.4 (2021-09-24) + +* [bitnami/*] Generate READMEs with new generator version (#7614) ([e5ab2e6](https://github.com/bitnami/charts/commit/e5ab2e6ecdd6bce800863f154cda524ff9f6c117)), closes [#7614](https://github.com/bitnami/charts/issues/7614) +* [bitnami/etcd] feat: allow reuse startFromSnapshot volume in the disasterRecovery (#7511) ([b307202](https://github.com/bitnami/charts/commit/b3072027ca3ce7f9f0e584f4f1c6935752052b7b)), closes [#7511](https://github.com/bitnami/charts/issues/7511) + +## 6.8.3 (2021-09-24) + +* [bitnami/etcd] Release 6.8.3 updating components versions ([e5c193e](https://github.com/bitnami/charts/commit/e5c193e68bcf1982031507d764bd7fca55e96680)) + +## 6.8.2 (2021-09-17) + +* [bitnami/etcd] fix snapshots directory ownership when security context is enabled (#7498) ([831eed7](https://github.com/bitnami/charts/commit/831eed7501012164c5f65c2188dec19662900870)), closes [#7498](https://github.com/bitnami/charts/issues/7498) + +## 6.8.1 (2021-09-15) + +* [bitnami/several] Regenerate README tables ([c01cbe5](https://github.com/bitnami/charts/commit/c01cbe5e3bfe8a5f8545107f9c8d99a58d0e6832)) +* fix kubectl exec example in chart notes (#7496) ([971c4a4](https://github.com/bitnami/charts/commit/971c4a44a671f584fea9bbae98a29a7a0a3e6c64)), closes [#7496](https://github.com/bitnami/charts/issues/7496) + +## 6.8.0 (2021-09-07) + +* [bitnami/etcd] add Prometheus podmonitor relabelings parameter (#7399) ([9e15549](https://github.com/bitnami/charts/commit/9e15549401f391b8c7db115a2e628431e6dc469c)), closes [#7399](https://github.com/bitnami/charts/issues/7399) +* [bitnami/several] Regenerate README tables ([8c2dfde](https://github.com/bitnami/charts/commit/8c2dfde7724141adfb90f0fa6bb97bf9acd4d14e)) + +## 6.7.0 (2021-09-02) + +* [bitnami/etcd] Adding the possibility to disable service (#7371) ([91e80b3](https://github.com/bitnami/charts/commit/91e80b3bfd35e27aad91597b51dee5a19483580d)), closes [#7371](https://github.com/bitnami/charts/issues/7371) + +## 6.6.2 (2021-09-01) + +* [bitnami/etcd] add service.clusterIP parameter (#7356) ([78508eb](https://github.com/bitnami/charts/commit/78508eb2031d9bcac4f20750f3ada56806b4db0f)), closes [#7356](https://github.com/bitnami/charts/issues/7356) +* [bitnami/several] Regenerate README tables ([da2513b](https://github.com/bitnami/charts/commit/da2513bf0a33819f3b1151d387c631a9ffdb03e2)) + +## 6.6.1 (2021-08-25) + +* [bitnami/etcd] Release 6.6.1 updating components versions ([4a34868](https://github.com/bitnami/charts/commit/4a348683e66a53fe4d176961b4317f65256d5ed2)) + +## 6.6.0 (2021-08-25) + +* [bitnami/etcd] Add support for custom terminationGracePeriodSeconds (#7307) ([e880129](https://github.com/bitnami/charts/commit/e88012920074cf8d3d759b0b66297b35ed18a05f)), closes [#7307](https://github.com/bitnami/charts/issues/7307) + +## 6.5.0 (2021-08-20) + +* [bitnami/etcd] add support for network policy (#7205) ([1e24ddb](https://github.com/bitnami/charts/commit/1e24ddb29c8541e046a7598cc1c6b23e1b08d89b)), closes [#7205](https://github.com/bitnami/charts/issues/7205) + +## 6.4.1 (2021-08-18) + +* [bitnami/etcd] Release 6.4.1 updating components versions ([2e0bfc1](https://github.com/bitnami/charts/commit/2e0bfc11578421d7456048f014dd9c83ed5bf13d)) + +## 6.4.0 (2021-08-17) + +* [bitnami/etcd] Add existingSecretPasswordKey field in auth.rbac (#7212) ([5baf3cf](https://github.com/bitnami/charts/commit/5baf3cf20fcdbf1399178c5c51e34135665baeae)), closes [#7212](https://github.com/bitnami/charts/issues/7212) + +## 6.3.4 (2021-08-17) + +* [bitnami/etcd] use existingSecret parameters as a template (#7209) ([2bfeaef](https://github.com/bitnami/charts/commit/2bfeaefbac4a91d07c3d78f658999781b2c3866d)), closes [#7209](https://github.com/bitnami/charts/issues/7209) +* [bitnami/several] Update READMEs (#7108) ([44961d9](https://github.com/bitnami/charts/commit/44961d9cdfae1b0d06808124c4b47e8adc3de146)), closes [#7108](https://github.com/bitnami/charts/issues/7108) + +## 6.3.3 (2021-07-30) + +* [bitnami/etcd] Release 6.3.3 updating components versions ([ba6f835](https://github.com/bitnami/charts/commit/ba6f8356e725a8342fe738a3b73ae40d5488b2ad)) + +## 6.3.2 (2021-07-30) + +* [bitnami/etcd] Release 6.3.2 updating components versions ([c2cf742](https://github.com/bitnami/charts/commit/c2cf742d7be0e8862d62882091ec4da9e8960667)) + +## 6.3.1 (2021-07-27) + +* [bitnami/several] Fix default values and regenerate README (#7024) ([4c86733](https://github.com/bitnami/charts/commit/4c867335c5c9c5aba041868df16ebb8f64ac68bd)), closes [#7024](https://github.com/bitnami/charts/issues/7024) + +## 6.3.0 (2021-07-27) + +* [bitnami/several] Add diagnostic mode (#7012) ([f1344b0](https://github.com/bitnami/charts/commit/f1344b0361c5a93bf971d08f0fc64d3c8588cbf9)), closes [#7012](https://github.com/bitnami/charts/issues/7012) + +## 6.2.11 (2021-07-19) + +* [bitnami/*] Adapt values.yaml of Elasticsearch, etcd and external-dns charts (#6822) ([a5d80d6](https://github.com/bitnami/charts/commit/a5d80d61b044023eb41d0dd187fe71a16515ab2d)), closes [#6822](https://github.com/bitnami/charts/issues/6822) + +## 6.2.10 (2021-07-17) + +* [bitnami/etcd] Release 6.2.10 updating components versions ([037d1b9](https://github.com/bitnami/charts/commit/037d1b9558b9f4c32350c10428e6045b59318e86)) + +## 6.2.9 (2021-06-17) + +* [bitnami/etcd] Release 6.2.9 updating components versions ([8e9ee29](https://github.com/bitnami/charts/commit/8e9ee2989f0b2b2ba04bff987457f4aa0f55dd7b)) + +## 6.2.8 (2021-06-15) + +* [bitnami/etcd] Release 6.2.8 updating components versions ([324afd9](https://github.com/bitnami/charts/commit/324afd9caae092abab7e5c9ed9c0e4fc92120b2e)) + +## 6.2.7 (2021-06-11) + +* [bitnami/etcd] Release 6.2.7 updating components versions ([0c73128](https://github.com/bitnami/charts/commit/0c7312867be421012313b24002cf6cd5d2f2ad8e)) + +## 6.2.6 (2021-05-28) + +* [bitnami/etcd] Release 6.2.6 updating components versions ([8cdea9d](https://github.com/bitnami/charts/commit/8cdea9dd1b093f20901840f16e510ceb823b73ac)) + +## 6.2.5 (2021-05-23) + +* [bitnami/etcd] Release 6.2.5 updating components versions ([976a204](https://github.com/bitnami/charts/commit/976a2041126f5780c97eb2c146cb277774266098)) + +## 6.2.4 (2021-05-14) + +* [bitnami/etcd] Release 6.2.4 updating components versions ([6c2ec2b](https://github.com/bitnami/charts/commit/6c2ec2b69beb961e708f41f03ab18ffdebf3fa95)) + +## 6.2.3 (2021-04-16) + +* T39353 Updated links (#6128) ([9d5aa6e](https://github.com/bitnami/charts/commit/9d5aa6ef8af330126610c45e9c28fb0d312c54f1)), closes [#6128](https://github.com/bitnami/charts/issues/6128) + +## 6.2.2 (2021-04-15) + +* [bitnami/etcd] Release 6.2.2 updating components versions ([f04d933](https://github.com/bitnami/charts/commit/f04d9336cf80090d20e9d9491589fb3dd4b758e6)) + +## 6.2.1 (2021-04-12) + +* [bitnami/etcd] Add image pull secrets to cronjob (#6071) ([ca6416a](https://github.com/bitnami/charts/commit/ca6416a749421e8b062ae09134d890d00299000c)), closes [#6071](https://github.com/bitnami/charts/issues/6071) + +## 6.2.0 (2021-04-09) + +* [bitnami/etcd] Service account specification (#6055) ([c1a4703](https://github.com/bitnami/charts/commit/c1a470352f93b86096b1731bd816d8632e51987c)), closes [#6055](https://github.com/bitnami/charts/issues/6055) + +## 6.1.5 (2021-04-07) + +* [bitnami/etcd] Fix configmap template (#6025) ([2fe5d38](https://github.com/bitnami/charts/commit/2fe5d381ff0a1bd9e4cd08fd8629c67e30bd2a29)), closes [#6025](https://github.com/bitnami/charts/issues/6025) + +## 6.1.4 (2021-04-06) + +* Etcd init from snapshot (#6013) ([570be78](https://github.com/bitnami/charts/commit/570be784025585e5837b0888c10e7e76dd017569)), closes [#6013](https://github.com/bitnami/charts/issues/6013) + +## 6.1.3 (2021-03-18) + +* Quote retention env variable values (#5831) ([65747e8](https://github.com/bitnami/charts/commit/65747e81bc7d8a84c4ef7940bfb9b66da77184cc)), closes [#5831](https://github.com/bitnami/charts/issues/5831) + +## 6.1.2 (2021-03-16) + +* [bitnami/etcd] Release 6.1.2 updating components versions ([182114e](https://github.com/bitnami/charts/commit/182114e06decb91fa3eb07ee0cf14b4cf74da724)) + +## 6.1.1 (2021-03-15) + +* [bitnami/etcd] Service: Fix clientPortNameOverride and peerPortNameOverride. (#5786) ([4a2f19f](https://github.com/bitnami/charts/commit/4a2f19f38b7aa745a88889bd5ac99c03210255c2)), closes [#5786](https://github.com/bitnami/charts/issues/5786) + +## 6.1.0 (2021-03-15) + +* [bitnami/chart] Adding parameters to enable and manipulate auto-compaction feature of etcd (#5756) ([43fc0df](https://github.com/bitnami/charts/commit/43fc0dfd346b05aaa1934d08e5b4a0763581b417)), closes [#5756](https://github.com/bitnami/charts/issues/5756) +* Update README.md ([7e84fee](https://github.com/bitnami/charts/commit/7e84feea8ebb06542124d14be3f1ba1e2c41fa3e)) +* Update README.md ([44f1276](https://github.com/bitnami/charts/commit/44f1276c0b2dc6f760700bb14b475dac934e1e57)) + +## 6.0.0 (2021-03-12) + +* [bitnami/etcd] Major version: refactoring (#5682) ([5741a6d](https://github.com/bitnami/charts/commit/5741a6ded476bb88930c1f031f83890b59d26a21)), closes [#5682](https://github.com/bitnami/charts/issues/5682) + +## 5.6.2 (2021-03-04) + +* [bitnami/*] Remove minideb mentions (#5677) ([870bc4d](https://github.com/bitnami/charts/commit/870bc4dba1fc3aa55dd157da6687b25e8d352206)), closes [#5677](https://github.com/bitnami/charts/issues/5677) + +## 5.6.1 (2021-02-10) + +* [bitnami/*] Add notice regarding parameters immutability after chart installation (#4853) ([5f09573](https://github.com/bitnami/charts/commit/5f095734f92555dec7cd0e3ee961f315eac170ff)), closes [#4853](https://github.com/bitnami/charts/issues/4853) +* [bitnami/etcd] Release 5.6.1 updating components versions ([aecde05](https://github.com/bitnami/charts/commit/aecde055759752a922a2f03585e4c395ed48f94c)) + +## 5.6.0 (2021-01-28) + +* [bitnami/etcd] Add hostAliases (#5238) ([c36935d](https://github.com/bitnami/charts/commit/c36935d1135ec159dc8ea25dc1da1897307f6307)), closes [#5238](https://github.com/bitnami/charts/issues/5238) + +## 5.5.2 (2021-01-25) + +* [bitnami/*] Unify icons in Chart.yaml and add missing fields (#5206) ([0462921](https://github.com/bitnami/charts/commit/0462921418ca8d54308b7466197a6d53ffae4628)), closes [#5206](https://github.com/bitnami/charts/issues/5206) + +## 5.5.1 (2021-01-19) + +* [bitnami/*] Change helm version in the prerequisites (#5090) ([c5e67a3](https://github.com/bitnami/charts/commit/c5e67a388743cbee28439d2cabca27884b9daf97)), closes [#5090](https://github.com/bitnami/charts/issues/5090) +* [bitnami/etcd] Drop values-production.yaml support (#5103) ([5cd0f00](https://github.com/bitnami/charts/commit/5cd0f00ca12bd7d6a70d31c82045535d4f4945a2)), closes [#5103](https://github.com/bitnami/charts/issues/5103) + +## 5.5.0 (2021-01-11) + +* [bitnami/etcd] Reuse startFromSnapshot volume in the disasterRecovery mode (#4940) ([2e05d2a](https://github.com/bitnami/charts/commit/2e05d2ab62cfef5105c77e5d66333c2314f6b089)), closes [#4940](https://github.com/bitnami/charts/issues/4940) + +## 5.4.2 (2021-01-04) + +* [bitnami/etcd] Fix port in concatenation of endpoints (#4875) ([ffdb62e](https://github.com/bitnami/charts/commit/ffdb62e36fbcbc83beb5932170e87d828bf5fc54)), closes [#4875](https://github.com/bitnami/charts/issues/4875) + +## 5.4.1 (2020-12-26) + +* [bitnami/etcd] Release 5.4.1 updating components versions ([18e5e47](https://github.com/bitnami/charts/commit/18e5e478bbb581c0a6649021384ecc53485d8039)) + +## 5.4.0 (2020-12-18) + +* [bitnami/etcd] Allow externalIPs in svc (#4770) ([5ee088e](https://github.com/bitnami/charts/commit/5ee088e859c6553e6ab64f81d2d2c9ec9b732d84)), closes [#4770](https://github.com/bitnami/charts/issues/4770) + +## 5.3.2 (2020-12-17) + +* [bitnami/*] fix typos (#4699) ([49adc63](https://github.com/bitnami/charts/commit/49adc63b672da976c55af2e077aa5648a357b77f)), closes [#4699](https://github.com/bitnami/charts/issues/4699) +* [bitnami/etcd] Add publishNotReadyAddresses: true to headless service (#4748) ([bfc6ae7](https://github.com/bitnami/charts/commit/bfc6ae742227e794265f80059788ef97b385e0bd)), closes [#4748](https://github.com/bitnami/charts/issues/4748) + +## 5.3.1 (2020-12-11) + +* [bitnami/*] Update dependencies (#4694) ([2826c12](https://github.com/bitnami/charts/commit/2826c125b42505f28431301e3c1bbe5366e47a01)), closes [#4694](https://github.com/bitnami/charts/issues/4694) + +## 5.3.0 (2020-12-01) + +* [bitnami/etcd] Add `statefulsetLabels` and `podLabels` (#4518) ([6ff4b1d](https://github.com/bitnami/charts/commit/6ff4b1d9a1ad1fb89a8d1508de036d21985c1b32)), closes [#4518](https://github.com/bitnami/charts/issues/4518) + +## 5.2.1 (2020-11-26) + +* [bitnami/etcd] Release 5.2.1 updating components versions ([9aeb6e0](https://github.com/bitnami/charts/commit/9aeb6e0b35be9b68d9bf533d2df28de79f3f2e7f)) + +## 5.2.0 (2020-11-25) + +* [bitnami/*] Affinity based on common presets (ii) (#4472) ([934259f](https://github.com/bitnami/charts/commit/934259f78127c53c747dfff5e5df9f67738ec79c)), closes [#4472](https://github.com/bitnami/charts/issues/4472) + +## 5.1.0 (2020-11-13) + +* [bitnami/etcd] Allow client/peer port name overrides for custom SRV domains (#4147) ([039a831](https://github.com/bitnami/charts/commit/039a8314982d80b12b700a2bcb5778d259a4dbad)), closes [#4147](https://github.com/bitnami/charts/issues/4147) + +## 5.0.1 (2020-11-11) + +* [bitnami/etcd] Remove unused etcd.dataDir template function (#4254) ([2f9ee67](https://github.com/bitnami/charts/commit/2f9ee67318847d1df3559005f97420723ad374dd)), closes [#4254](https://github.com/bitnami/charts/issues/4254) + +## 5.0.0 (2020-11-10) + +* [bitnami/etcd] Major version. Adapt Chart to apiVersion: v2 (#4283) ([7b380b6](https://github.com/bitnami/charts/commit/7b380b61491aa4c3ea7b34b1a0693bca6338d610)), closes [#4283](https://github.com/bitnami/charts/issues/4283) + +## 4.12.2 (2020-11-04) + +* [bitnami/*] Include link to Troubleshootin guide on README.md (#4136) ([c08a20e](https://github.com/bitnami/charts/commit/c08a20e3db004215383004ff023a73fcc2522e72)), closes [#4136](https://github.com/bitnami/charts/issues/4136) +* [bitnami/etcd] Disable metrics even if podAnnotations are provided (#4146) ([91ca0df](https://github.com/bitnami/charts/commit/91ca0df4a66ae3326dd12b9021431fb12e7892ff)), closes [#4146](https://github.com/bitnami/charts/issues/4146) + +## 4.12.1 (2020-10-22) + +* [bitnami/etcd] Release 4.12.1 updating components versions ([94e0dd6](https://github.com/bitnami/charts/commit/94e0dd6a25aaa395fcb2c219be9120beb8752254)) + +## 4.12.0 (2020-10-14) + +* [bitnami/etcd] Allow alternate TLS secret filenames (#3902) ([e901dd8](https://github.com/bitnami/charts/commit/e901dd893611f1d492318425b2fc548f8b1e2f0b)), closes [#3902](https://github.com/bitnami/charts/issues/3902) + +## 4.11.1 (2020-09-22) + +* [bitnami/etcd] Release 4.11.1 updating components versions ([cd8ff11](https://github.com/bitnami/charts/commit/cd8ff11cfb14d04f2027c99fc9631a5c63f3bbda)) + +## 4.11.0 (2020-09-11) + +* [bitnami/etcd] add loadBalancerSourceRanges (#3637) ([975fe17](https://github.com/bitnami/charts/commit/975fe174c4f0419d6096730e5a6b571e550d611d)), closes [#3637](https://github.com/bitnami/charts/issues/3637) +* [bitnami/metrics-server] Add source repo (#3577) ([1ed12f9](https://github.com/bitnami/charts/commit/1ed12f96af75322b46afdb2b3d9907c11b13f765)), closes [#3577](https://github.com/bitnami/charts/issues/3577) + +## 4.10.1 (2020-08-26) + +* [bitnami/etcd] Release 4.10.1 updating components versions ([996649f](https://github.com/bitnami/charts/commit/996649f2e91eaaa738e7f513f87570b0056366fc)) + +## 4.10.0 (2020-08-21) + +* [bitnami/etcd] Add persistent volume claim selector (#3480) ([0e10b5f](https://github.com/bitnami/charts/commit/0e10b5fa2924a10d8672c58ac97c7f349601f0e0)), closes [#3480](https://github.com/bitnami/charts/issues/3480) + +## 4.9.5 (2020-08-20) + +* [bitnami/etcd] Release 4.9.5 updating components versions ([ac3ee4a](https://github.com/bitnami/charts/commit/ac3ee4a37ab85887e363c540edf258f96853a33f)) + +## 4.9.4 (2020-08-11) + +* [bitnami/etcd] Fix typo in values file (#3378) ([0fdec44](https://github.com/bitnami/charts/commit/0fdec44267ad4c8df6c416bf387bb4435ad1c91a)), closes [#3378](https://github.com/bitnami/charts/issues/3378) + +## 4.9.3 (2020-08-05) + +* [bitnami/etcd] Release 4.9.3 updating components versions ([8476d54](https://github.com/bitnami/charts/commit/8476d541d1f784224f9f637d0e37b9a2015c5daf)) + +## 4.9.2 (2020-07-31) + +* [bitnami/*] Fix TL;DR typo in READMEs (#3280) ([3d7ab40](https://github.com/bitnami/charts/commit/3d7ab406fecd64f1af25f53e7d27f03ec95b29a4)), closes [#3280](https://github.com/bitnami/charts/issues/3280) +* [bitnami/etcd] Fix etcd deployment when disabling useAutoTLS in client auth configuration (#3279) ([adb3f6b](https://github.com/bitnami/charts/commit/adb3f6bd491be3f9abe2284fbfc1017e46e5958b)), closes [#3279](https://github.com/bitnami/charts/issues/3279) + +## 4.9.1 (2020-07-27) + +* [bitnami/etcd] Fix issue when removing pod not being reattatched to the cluster properly (#3212) ([7734c98](https://github.com/bitnami/charts/commit/7734c98b985eb065700fe1e769287edfd8dd1b0b)), closes [#3212](https://github.com/bitnami/charts/issues/3212) + +## 4.9.0 (2020-07-23) + +* [bitnami/etcd] Ensure ETCD_DATA_DIR permissions are set to 700 (#3196) ([b331fa6](https://github.com/bitnami/charts/commit/b331fa6b61e91bef44e8d6c60eda5c3f6c4ff97a)), closes [#3196](https://github.com/bitnami/charts/issues/3196) + +## 4.8.14 (2020-07-18) + +* [bitnami/etcd] Release 4.8.14 updating components versions ([6d58840](https://github.com/bitnami/charts/commit/6d588407d793d8f59e0594d5e005ab6a51d3827b)) + +## 4.8.13 (2020-07-16) + +* [bitnami/etcd] Fix issue on snapshooting cronjob (#3134) ([bbe0baa](https://github.com/bitnami/charts/commit/bbe0baad2e35e2778925eb6fd25b95560d5e2aa4)), closes [#3134](https://github.com/bitnami/charts/issues/3134) + +## 4.8.12 (2020-07-16) + +* [bitnami/etcd] Release 4.8.12 updating components versions ([6d05f38](https://github.com/bitnami/charts/commit/6d05f38d1c1767376830f53cd76188aa2fd5ff8d)) + +## 4.8.11 (2020-07-15) + +* [bitnami/etcd] Fix snapshotter job when there's only one replica (#3119) ([f234cc7](https://github.com/bitnami/charts/commit/f234cc7ff1c59950de819151b2da221d72ec24b6)), closes [#3119](https://github.com/bitnami/charts/issues/3119) + +## 4.8.10 (2020-07-10) + +* [bitnami/all] Add categories (#3075) ([63bde06](https://github.com/bitnami/charts/commit/63bde066b87a140fab52264d0522401ab3d63509)), closes [#3075](https://github.com/bitnami/charts/issues/3075) +* [bitnami/etcd] Release 4.8.10 updating components versions ([25540ec](https://github.com/bitnami/charts/commit/25540ecb8011121c39b8c20ab372980fde1b1502)) + +## 4.8.9 (2020-07-03) + +* [bitnami/etcd] Release 4.8.9 updating components versions ([65f0ed6](https://github.com/bitnami/charts/commit/65f0ed663d7bcbbca9f3fdf09efaaa55edb492d4)) + +## 4.8.8 (2020-07-03) + +* Fixing grep used to create member_id file (#3013) ([90af20f](https://github.com/bitnami/charts/commit/90af20ff79a6887bf8faa92ccd883fbaba182674)), closes [#3013](https://github.com/bitnami/charts/issues/3013) + +## 4.8.7 (2020-06-29) + +* [bitnami/etcd] Release 4.8.7 updating components versions ([5649a31](https://github.com/bitnami/charts/commit/5649a310a7ab020743dc3986c965c03c4f39ee04)) + +## 4.8.6 (2020-06-29) + +* [bitnami/etcd] Don't create hardlink for latest snapshot (#2948) ([28c35be](https://github.com/bitnami/charts/commit/28c35be40c60e691c307e2ef1b22b9abc9b76280)), closes [#2948](https://github.com/bitnami/charts/issues/2948) + +## 4.8.5 (2020-06-24) + +* [bitnami/etcd] Release 4.8.5 updating components versions ([dc009cd](https://github.com/bitnami/charts/commit/dc009cd6881fa15967bd9e96dc1415a5905d8a95)) + +## 4.8.4 (2020-06-16) + +* [bitnami/etcd] make cluster state variable configurable to fix https://github.com/bitnami/charts/iss ([bd7f575](https://github.com/bitnami/charts/commit/bd7f575ebad9c200cd8e90f9fb3e17451ce607f5)) + +## 4.8.3 (2020-06-15) + +* [bitnami/etcd] Release 4.8.3 updating components versions ([ded4ba8](https://github.com/bitnami/charts/commit/ded4ba8c88828b172975c54e399fd17a1b90a3c3)) + +## 4.8.2 (2020-05-21) + +* [bitnami/etcd] Release 4.8.2 updating components versions ([c061385](https://github.com/bitnami/charts/commit/c0613854f7e4d7e6fa963e34f2b8a188294b5ed5)) +* update bitnami/common to be compatible with helm v2.12+ (#2615) ([c7751eb](https://github.com/bitnami/charts/commit/c7751eb5764e468e1854b58a1b8491d2b13e0a4a)), closes [#2615](https://github.com/bitnami/charts/issues/2615) + +## 4.8.1 (2020-05-18) + +* [bitnami/etcd] Release 4.8.1 updating components versions ([acfd77f](https://github.com/bitnami/charts/commit/acfd77f3cd579ba5c22c1d0d1222d47ea3f873c9)) + +## 4.8.0 (2020-05-08) + +* [bitnami/etcd] Add PodDisruptionBudget (#2545) ([78c98d3](https://github.com/bitnami/charts/commit/78c98d3ef60ced04a064cfa859334d42583c9562)), closes [#2545](https://github.com/bitnami/charts/issues/2545) + +## 4.7.5 (2020-04-22) + +* [bitnami/etcd] Release 4.7.5 updating components versions ([eab1be2](https://github.com/bitnami/charts/commit/eab1be28f69e2e6de9f5550d25bc4e96c78231de)) + +## 4.7.4 (2020-04-16) + +* [bitnami/etcd] Release 4.7.4 updating components versions ([3876458](https://github.com/bitnami/charts/commit/3876458b0618282037c19f0f697fc1e2739554a4)) + +## 4.7.3 (2020-04-02) + +* [bitnami/etcd] Release 4.7.3 updating components versions ([dbf0ed2](https://github.com/bitnami/charts/commit/dbf0ed29a77b062cf4f7a996f40ceaccd7880d53)) + +## 4.7.2 (2020-03-29) + +* [bitnami/etcd] Release 4.7.2 updating components versions ([a3940dd](https://github.com/bitnami/charts/commit/a3940ddba765104cc88c70b987d7fd71c7b3f6ac)) + +## 4.7.1 (2020-03-26) + +* [bitnami/etcd] Release 4.7.1 updating components versions ([4917220](https://github.com/bitnami/charts/commit/4917220baa6e3184474714447fbae0a67daba1e1)) + +## 4.7.0 (2020-03-25) + +* [bitnami/etcd] add priority class name option to etcd (#2126) ([8d87e6f](https://github.com/bitnami/charts/commit/8d87e6f2169b07ab16455cd2e06eb944c78c5a66)), closes [#2126](https://github.com/bitnami/charts/issues/2126) + +## 4.6.4 (2020-03-20) + +* [bitnami/etcd] Release 4.6.4 updating components versions ([bcfc42a](https://github.com/bitnami/charts/commit/bcfc42a8450fdc55db3669c1b1f595531c04bc6f)) + +## 4.6.3 (2020-03-19) + +* [bitnami/etcd] Release 4.6.3 updating components versions ([8cca14f](https://github.com/bitnami/charts/commit/8cca14f8e01aa259d06172f6d26df79bc9c098a7)) + +## 4.6.2 (2020-03-12) + +* [bitnami/etcd] Release 4.6.2 updating components versions ([e443986](https://github.com/bitnami/charts/commit/e44398698367c8064b9bf0e20a5ba2b68d655be9)) + +## 4.6.1 (2020-03-11) + +* Move charts from upstreamed folder to bitnami (#2032) ([a0e44f7](https://github.com/bitnami/charts/commit/a0e44f7d6a10b8b5643186130ea420887cb72c7c)), closes [#2032](https://github.com/bitnami/charts/issues/2032) + +## 4.6.0 (2020-03-02) + +* [bitnami/etcd] keep snapshot history (#1920) ([55bb26b](https://github.com/bitnami/charts/commit/55bb26b232bc0cd8d0aa6f369f1ca2de72e03784)), closes [#1920](https://github.com/bitnami/charts/issues/1920) + +## 4.5.1 (2020-02-26) + +* [bitnami/etcd] Release 4.5.1 updating components versions ([67827b5](https://github.com/bitnami/charts/commit/67827b50c51b70c09dccfdbcc15566aeea0c9ce9)) + +## 4.5.0 (2020-02-25) + +* [bitnami/etcd] fix save-snapshot script (#1958) ([8e451ec](https://github.com/bitnami/charts/commit/8e451ec70028cc4766b68bb2a7a141ddf19f35e6)), closes [#1958](https://github.com/bitnami/charts/issues/1958) +* [bitnami/etcd] Fix showing ETCD_ROOT_PASSWORD in the logs (#1909) ([5534f5e](https://github.com/bitnami/charts/commit/5534f5eee7e03b06679909fe43b24e415b96f16a)), closes [#1909](https://github.com/bitnami/charts/issues/1909) + +## 4.4.14 (2020-02-11) + +* [bitnami/several] Adapt READMEs and helpers to Helm 3 (#1911) ([40ee57c](https://github.com/bitnami/charts/commit/40ee57cf5164717357e1627b55bf25f59c40fbd1)), closes [#1911](https://github.com/bitnami/charts/issues/1911) + +## 4.4.13 (2020-02-11) + +* [bitnami/etcd] Release 4.4.13 updating components versions ([c4f47a0](https://github.com/bitnami/charts/commit/c4f47a05568abdf5e8fcccbacbea50b76f0b01d2)) + +## 4.4.12 (2020-02-10) + +* [bitnami/several] Replace stretch by buster in minideb secondary containers (#1900) ([678febc](https://github.com/bitnami/charts/commit/678febc237594606f2505ba98c651a8ab8f484ab)), closes [#1900](https://github.com/bitnami/charts/issues/1900) + +## 4.4.11 (2020-01-23) + +* [bitnami/etcd] Release 4.4.11 updating components versions ([eb1cb9f](https://github.com/bitnami/charts/commit/eb1cb9f343393c81f8e3f5dda3ae0b2ad5400ab3)) + +## 4.4.10 (2020-01-14) + +* [bitnami/etcd] Release 4.4.10 updating components versions ([cf60876](https://github.com/bitnami/charts/commit/cf60876b0d10f39e8e828dadc4e7ee120b6b09b0)) + +## 4.4.9 (2020-01-10) + +* [bitnami/etcd] Release 4.4.9 updating components versions ([dfe3d5b](https://github.com/bitnami/charts/commit/dfe3d5b6d993c31c122c295585207143a549b8c7)) + +## 4.4.8 (2020-01-02) + +* [bitnami/etcd] support snapshot restore on single node deployment ([7d18cf4](https://github.com/bitnami/charts/commit/7d18cf4835c92de27cc31da388f3d93cdedb172a)), closes [#1784](https://github.com/bitnami/charts/issues/1784) + +## 4.4.7 (2019-12-18) + +* [bitnami/etcd] Use auto-generated TLS certificates when ETCD_AUTO_TLS is true ([550a1d6](https://github.com/bitnami/charts/commit/550a1d6b253f93e34c7ad7818d6101dc45b34524)) + +## 4.4.6 (2019-12-13) + +* use BITNAMI_DEBUG te enable bash script debugging ([de6f9bc](https://github.com/bitnami/charts/commit/de6f9bcf033db6de26444a3af113ed4538d2a22d)) + +## 4.4.5 (2019-11-27) + +* [bitnami/*] Remove reference to Cassandra on non-cassandra charts ([8418d32](https://github.com/bitnami/charts/commit/8418d3228b815168330721ede2721e5cfeb25e96)) + +## 4.4.4 (2019-11-22) + +* [bitnami/etcd] Fix regexp to detect removed members from cluster ([e4d820e](https://github.com/bitnami/charts/commit/e4d820e34469595d3946da8e7b8709191b6fd01d)) +* [bitnami/etcd] Update components versions ([5ff0985](https://github.com/bitnami/charts/commit/5ff0985ee51e140e695a485db19495e433577b2e)) +* fix updateStrategy in etcd statefulset ([0cf3548](https://github.com/bitnami/charts/commit/0cf3548f94e499c5abf9ca66f44b182c269a9d70)) +* Update README.md ([237c982](https://github.com/bitnami/charts/commit/237c9827733976892cc162d3936d6d5512c4d6e1)) + +## 4.4.3 (2019-11-18) + +* [bitnami/elasticsearch] Lint chart ([e5776f9](https://github.com/bitnami/charts/commit/e5776f95aab2cceb00a53d3ba5d8c497f83965ba)) + +## 4.4.2 (2019-11-11) + +* [bitnami/etcd] etcd auto-recover cluster ([0877afe](https://github.com/bitnami/charts/commit/0877afe268683e22c06387957187b4079de244ca)) + +## 4.4.1 (2019-11-01) + +* [bitnami/etcd] Fix issue with restarting pods on single-node clusters ([3f085a7](https://github.com/bitnami/charts/commit/3f085a71be9758ad048217f490daaf645a516ff1)) + +## 4.4.0 (2019-10-25) + +* [bitnami/etcd] adds servicemonitor support ([7c4bc74](https://github.com/bitnami/charts/commit/7c4bc743b8b09d84185dcf11586fc9578a209d91)) + +## 4.3.13 (2019-10-24) + +* Adapt README in charts (I) ([eb80a7e](https://github.com/bitnami/charts/commit/eb80a7eadd32e55caaa9aa3b1b7741e02f2648c7)) +* Fix links because of section renaming ([8e6fa3b](https://github.com/bitnami/charts/commit/8e6fa3bf7e3198954b6af507cf143fd4870c1c33)) + +## 4.3.11 (2019-10-23) + +* [bitnami/etcd] Release 4.3.11 updating components versions ([07bf653](https://github.com/bitnami/charts/commit/07bf653a55e3651e093104dbdce8fa7acf940935)) + +## 4.3.10 (2019-10-15) + +* [bitnami/etcd] Update prerequisites ([56bccc4](https://github.com/bitnami/charts/commit/56bccc4c9b1e1ab64e8f02258f115fe251314029)) + +## 4.3.9 (2019-10-11) + +* [bitnami/etcd] Release 4.3.9 updating components versions ([7466ea7](https://github.com/bitnami/charts/commit/7466ea750e8b59cb401081ca903b029ef42332c4)) +* Fix conflict between ETCDCTL_ENDPOINTS env var and --endpoints cmdline parameter. Add debug output f ([de0ebfa](https://github.com/bitnami/charts/commit/de0ebfaeb3108a650f8610dce4ff3e5d163c80f6)) +* Remove extra blank lines and remove unnecessary 'exec' command. ([bfb0a28](https://github.com/bitnami/charts/commit/bfb0a280b0f8bee052d008ad55017d6c3a4510a5)) + +## 4.3.8 (2019-10-01) + +* Remove duplicated section ([93ae67e](https://github.com/bitnami/charts/commit/93ae67e970d748162bda6df1f1272c74dfcb57c6)) + +## 4.3.7 (2019-09-20) + +* [bitnami/*] Update apiVersion on sts, deployments, daemonsets and podsecuritypolicies ([4dfac07](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0)) + +## 4.3.6 (2019-09-18) + +* [bitnami/etcd] Release 4.3.6 updating components versions ([463b475](https://github.com/bitnami/charts/commit/463b475365a9203f138db9801a24a10d8604a713)) + +## 4.3.5 (2019-09-02) + +* [bitnami/etcd] Release 4.3.5 updating components versions ([1d306af](https://github.com/bitnami/charts/commit/1d306afd39a42678910d2a7b91f574dcdc1db044)) +* Use stretch instead of buster to be consistent ([decd85f](https://github.com/bitnami/charts/commit/decd85ff9ee57beb656648918b1197b6d8b9d5af)) + +## 4.3.4 (2019-08-30) + +* [bitnami/*] Use buster instead of latest as minideb tag and adapt sysctl ([921e811](https://github.com/bitnami/charts/commit/921e81153c755c5e06b86fbc54841c115f6e10bb)) + +## 4.3.3 (2019-08-22) + +* [bitnami/*] Fix 'storageClass' macros ([f41c193](https://github.com/bitnami/charts/commit/f41c19310b0aba01be37217e530b678ae30a560f)) + +## 4.3.2 (2019-08-21) + +* [bitnami/etcd] Release 4.3.2 updating components versions ([8adc421](https://github.com/bitnami/charts/commit/8adc42168fce25e6028b2755dc181b92e52b4364)) + +## 4.3.1 (2019-08-21) + +* Refactor StorageClass template to support old Helm versions ([1d7f3df](https://github.com/bitnami/charts/commit/1d7f3df1250d8f3ba7b17add67de6515dd93f1e7)) +* Refactor storageClassTemplate ([1872215](https://github.com/bitnami/charts/commit/1872215effe0a6ff672537387684c8a97fb3093c)) + +## 4.3.0 (2019-08-19) + +* Add global variable to set the storage class to all of the Hekm Charts ([cdb4bdc](https://github.com/bitnami/charts/commit/cdb4bdceda07e03f3902ec2796eab54d2c6f1650)) +* Update charts versions ([9459dbb](https://github.com/bitnami/charts/commit/9459dbbf98c5572f0b92cd3eef8e12ec83a48397)) + +## 4.2.3 (2019-08-14) + +* bumped chart fix version + added the secret as parameter and added an if statement for the volume an ([c8f8448](https://github.com/bitnami/charts/commit/c8f8448d6ef88ae5c239fa185363ef6a97420e18)), closes [#1359](https://github.com/bitnami/charts/issues/1359) +* fix #1359 ([cb7cef3](https://github.com/bitnami/charts/commit/cb7cef31c8eaf6b437eed88ef264e0bd3156044d)), closes [#1359](https://github.com/bitnami/charts/issues/1359) + +## 4.2.2 (2019-08-13) + +* Update README.md ([3ba1dbd](https://github.com/bitnami/charts/commit/3ba1dbd477d78067c76e44c3375777150cffa186)) +* Updated version numbers ([aa0294b](https://github.com/bitnami/charts/commit/aa0294b246340ed44d76f8cac59338ee685fab5e)) + +## 4.2.1 (2019-07-31) + +* [bitnami/etcd] Release 4.2.1 updating components versions ([d2e3ae1](https://github.com/bitnami/charts/commit/d2e3ae129036a9564de4f7c5b7acfa7671406875)) + +## 4.2.0 (2019-07-29) + +* [bitnami/etcd] Bumped version of the chart for my PR ([1c99d45](https://github.com/bitnami/charts/commit/1c99d453748f239c1b5ef736a3034ea63435ecb4)) +* [bitnami/etcd] Debug logging added for pod bash scripts ([c33621b](https://github.com/bitnami/charts/commit/c33621b06e3f379f3c32a9e9c9cf8d429db2ea53)) +* [bitnami/etcd] Fix readinessProbe, cluster can only bootstrap when all nodes come up simultaneously ([fd8e42b](https://github.com/bitnami/charts/commit/fd8e42b521beec40ce97f9c1b150b24bd30560e3)) +* Fix indentation ([c699e9f](https://github.com/bitnami/charts/commit/c699e9f2aed150351c8a7b21717b5a959ef2d2e3)) +* Fix livenessProbe ([bbe462c](https://github.com/bitnami/charts/commit/bbe462c3d5a70a78098f62b85f30b43fe678f606)) + +## 4.1.0 (2019-07-22) + +* Change etcd ([9d01e0a](https://github.com/bitnami/charts/commit/9d01e0acbeab66dfe54bbcda1d53652cc1e383f9)) + +## 4.0.0 (2019-07-17) + +* Implement again #1283 changes but bumping the major version ([1ce079c](https://github.com/bitnami/charts/commit/1ce079c789a6c0f5174094af3ea6fb67b6c926fd)), closes [#1283](https://github.com/bitnami/charts/issues/1283) + +## 3.1.3 (2019-07-17) + +* Revert pull request #1283 ([8e5940a](https://github.com/bitnami/charts/commit/8e5940a9260dc722ae1a630a6b6e21df2502323f)), closes [#1283](https://github.com/bitnami/charts/issues/1283) + +## 3.1.2 (2019-07-11) + +* Standardize component.name & component.fullname functions ([775948e](https://github.com/bitnami/charts/commit/775948eb27ccc5599262002b71f4982cc2b2dc8d)) + +## 3.1.1 (2019-06-27) + +* [bitnami/etcd] Fix 'unbound variable' issue when RBAC is set to false ([e933e91](https://github.com/bitnami/charts/commit/e933e91cf36e6253cb2ed9d9a4d226640ae4e0a5)) +* Adapt 'is_disastrous_failure' code depending on wether disasterRecovery is enabled or not ([3ea6a19](https://github.com/bitnami/charts/commit/3ea6a1978d71f15f8682ddcc519fd371618ff471)) +* Add a function to decided whether to add new member or not ([3679c68](https://github.com/bitnami/charts/commit/3679c68e9f316556281649970ea76b38629dad43)) +* Add message to warn the users about being unable to recover the cluster ([4fe9a1b](https://github.com/bitnami/charts/commit/4fe9a1bde8eb4dbda70f63f3fbb053bff3556923)) +* Allow 'member remove' command to fail ([cf65d78](https://github.com/bitnami/charts/commit/cf65d78a882f8ed1c182a0292641c1840fc3a446)) +* Fix pod recovery when disaster recovery is not enabled ([4465239](https://github.com/bitnami/charts/commit/4465239abf10830e425662dc692a3a2fe7cec5a0)) +* Improve the comand to calculate ETCDCTL_ENDPOINTS ([d4d8a15](https://github.com/bitnami/charts/commit/d4d8a15260b8971865452374e4ed692c36bff21a)) +* Rebase from master ([2a14ca4](https://github.com/bitnami/charts/commit/2a14ca4cba029529e33d88edd8458d295f76e062)) +* Support 'existing claims' for snapshotter pvc ([2199a18](https://github.com/bitnami/charts/commit/2199a18599d49dbfc8d98c7d23159f0271c0ef2d)) +* Support creating new cluster from existing snapshot ([8d1280b](https://github.com/bitnami/charts/commit/8d1280ba21af26f09c5a19c3774bbee2bb1c3c61)) +* Use a 'cronjob' for snapshooting ([0ba23d1](https://github.com/bitnami/charts/commit/0ba23d17ae9f3c909873ab7dc2aa8c95eca03b2e)) + +## 3.0.0 (2019-06-21) + +* [bitnami/etcd] Support auto disaster recovery ([860a00f](https://github.com/bitnami/charts/commit/860a00f111a93cecdebf56a53f0fd0e9fa3cc2cc111a93cecdebf56a53f0fd0e9fa3cc2cc)) + +## 2.4.0 (2019-06-25) + +* Add nameOverride and fullnameOverride documentation. ([ff56764](https://github.com/bitnami/charts/commit/ff567648c005878b3d1be21e5e496b0b3048dfe7)) +* Add nameOverride/fullnameOverride examples to values*.yaml and update version. ([473aa3c](https://github.com/bitnami/charts/commit/473aa3c41255e9b59fe5ddc2564c6bea68e8cb3d)) +* Use the popular 'fullname' template definition. ([bff1e70](https://github.com/bitnami/charts/commit/bff1e70da072b72d75d2821ebf003c371b067a79)) + +## 3.0.0 (2019-06-21) + +* [bitnami/etcd] Support auto disaster recovery ([860a00f](https://github.com/bitnami/charts/commit/860a00f111a93cecdebf56a53f0fd0e9fa3cc2cc111a93cecdebf56a53f0fd0e9fa3cc2cc)) +* Changes in README ([7ac4ec0](https://github.com/bitnami/charts/commit/7ac4ec09a0113aae7d39173f52fb11c55f27cde5)) + +## 2.3.3 (2019-06-10) + +* bitnami/etcd: update to 3.3.13 ([370907e](https://github.com/bitnami/charts/commit/370907e9066162eba0333cadc611b0c3c1514265)) +* Bump version ([a672193](https://github.com/bitnami/charts/commit/a67219387480695cdd7e4dd2a7f5443ed8093a11)) + +## 2.3.2 (2019-06-10) + +* Unify sections ([c227027](https://github.com/bitnami/charts/commit/c227027c46af726ab4fcfd5cf4561ecfd1721117)) +* Use IfNotPresent as imagePullPolicy since we are using immutable tags ([53247da](https://github.com/bitnami/charts/commit/53247da385b3bed19f02505b059591869893ecbb)) + +## 2.3.1 (2019-06-07) + +* [bitnami/etcd] Unify and document production values ([22258c9](https://github.com/bitnami/charts/commit/22258c9aa2aaa80acb9a9a251d277c5f44bd799b)) + +## 2.3.0 (2019-06-06) + +* [bitnami/etcd] Provide a way to set 'GOMAXPROCS' ([67dac2f](https://github.com/bitnami/charts/commit/67dac2f944e3337c06acf316c550aa8e48162239)) +* Truncate etcd name to 63 chart instead 24 ([08aa621](https://github.com/bitnami/charts/commit/08aa621340aca2093b005e7be7e33bab14d2d5bb)) + +## 2.2.5 (2019-05-29) + +* Change syntax because of linter failing ([adfc357](https://github.com/bitnami/charts/commit/adfc35728c2a8a9def9e1897b3772d64df621354)) +* Fix https://github.com/helm/charts/pull/14199\#issuecomment-496883321 and support _sha256_ as an imm ([95957ea](https://github.com/bitnami/charts/commit/95957ea6430f28ec3593053afb0bfccb75703c79)) +* Use immutable tags in the main images ([17ca4f5](https://github.com/bitnami/charts/commit/17ca4f5c91da33da03f9e2d411fe5e004e825c4d)) + +## 2.2.4 (2019-05-28) + +* Prepare immutable tags ([f9093c1](https://github.com/bitnami/charts/commit/f9093c1aaa9cde25a042c3d62f7873385447cbf7)) + +## 2.2.3 (2019-05-03) + +* bitnami/etcd: update to 3.3.13 ([d684f25](https://github.com/bitnami/charts/commit/d684f25666cd8e50081300528ceaef42f9557eb1)) + +## 2.2.2 (2019-03-18) + +* Set rollingUpdate to null when updateStrategy is Recreate ([1a45bba](https://github.com/bitnami/charts/commit/1a45bba6ce964b3e2b939fba0cdf063cf66e7586)) + +## 2.2.1 (2019-03-18) + +* Remove pullSecrets for metrics without image in the helpers ([195f2ed](https://github.com/bitnami/charts/commit/195f2ed80f7f656d512dc476f54894681101e0f1)) + +## 2.2.0 (2019-03-12) + +* Fix typo ([72460d9](https://github.com/bitnami/charts/commit/72460d9740a52a39da859f1d64042670186907ea)) +* Fix typo in helpers ([d1d60dc](https://github.com/bitnami/charts/commit/d1d60dce662f60dfe201b6c948dbc70bafe18432)) +* pullSecrets for production and metrics ([22d6e0b](https://github.com/bitnami/charts/commit/22d6e0b546ec6b222cc444ea3c02fd25d6f31dce)) + +## 2.1.0 (2019-03-11) + +* [bitnami/etcd] Add global imagePullSecrets to overwrite any other existing one ([1009174](https://github.com/bitnami/charts/commit/1009174d1ec303f58b7bd4b11af710ce30b4916b)) +* Apply requested changes ([9411481](https://github.com/bitnami/charts/commit/94114819e1ec39397eb74ddb4492570efb399cda)) +* Get rid of useConfigMap missing value ([7cf14c3](https://github.com/bitnami/charts/commit/7cf14c332aa0081c64561aa92e7430b30aebdd9d)) + +## 2.0.0 (2019-03-04) + +* [bitnami/etcd] Allow custom configuration via file or env vars ([ce32b0d](https://github.com/bitnami/charts/commit/ce32b0dac7e88db0d26289b44d85c8a71a3dbe04)) +* [bitnami/etcd] Fix etcd node 0 recovery from failure ([9368593](https://github.com/bitnami/charts/commit/9368593b899ada04d2c445b3e6da23e29256f672)) + +## 1.5.6 (2019-02-27) + +* Add appiVersion to Chart.yaml ([08704a5](https://github.com/bitnami/charts/commit/08704a55fa287f8da2e680344163b65863959329)) + +## 1.5.5 (2019-02-21) + +* Update Chart.yaml ([aac1dfc](https://github.com/bitnami/charts/commit/aac1dfcdf7af591a986417bd1fb708100b11a463)) +* Update README.md ([ac35126](https://github.com/bitnami/charts/commit/ac3512603da1126280d370a8535f0203e2f78adf)) + +## 1.5.4 (2019-02-11) + +* chore(bitnami/etcd): version to 1.5.3 ([bae6229](https://github.com/bitnami/charts/commit/bae6229611e5f5274e5477766167d65e37ed55e2)) +* chore(bitnami/etcd): version to 1.5.4 ([498bdc2](https://github.com/bitnami/charts/commit/498bdc2a9dc2f91e46b1fffc9931a94b0899aa6f)) +* fix(bitnami/etcd): Sleep until cluster can respond before setting root ([8f8e731](https://github.com/bitnami/charts/commit/8f8e7319fc939e87675feb62fe73554630fb73fc)) + +## 1.5.3 (2019-02-07) + +* etcd: update to `3.3.12` ([b59b44a](https://github.com/bitnami/charts/commit/b59b44ad360a85e394318626d35412ca8f369ba3)) + +## 1.5.2 (2019-02-02) + +* chore(bitnami/etcd): version to 1.5.2 ([6380d49](https://github.com/bitnami/charts/commit/6380d49ad50edf28d09748ff5196efc891e60de2)) +* fix(bitnami/etc): revert PR #1006 ([23610d4](https://github.com/bitnami/charts/commit/23610d40d0ca6edfc961dca1c652439d6e75589f)), closes [#1006](https://github.com/bitnami/charts/issues/1006) [#1006](https://github.com/bitnami/charts/issues/1006) [#1040](https://github.com/bitnami/charts/issues/1040) +* fix(bitnami/etcd): right path for existingSecret ([e0bd1bd](https://github.com/bitnami/charts/commit/e0bd1bd3ef452ea256cda46309febef93050baef)), closes [#1041](https://github.com/bitnami/charts/issues/1041) +* fix(templates/statefulset.yaml): dnsBase path ([3bc9cf1](https://github.com/bitnami/charts/commit/3bc9cf16ed5f64be41ac46cb61a8efedaf6f8742)) +* Missing substitution of .svc.cluster.local for $dnsBase ([a99457b](https://github.com/bitnami/charts/commit/a99457b691f8b27c551ad9431e102bd143d1600f)) + +## 1.5.1 (2019-01-25) + +* Document the correct format for pullSecrets in READMEs ([9a2cf81](https://github.com/bitnami/charts/commit/9a2cf81399905b7b3190bbc35a8d554f94014ce8)) + +## 1.5.0 (2019-01-25) + +* etcd: Allow non-default service domains ([82f7884](https://github.com/bitnami/charts/commit/82f7884caddabf0c5c9bfa63b7db0b1b2b97fead)) + +## 1.4.3 (2019-01-12) + +* etcd: update to `3.3.11` ([fce09b2](https://github.com/bitnami/charts/commit/fce09b24f58af336f44a335e1d7d0876d27bd810)) + +## 1.4.2 (2019-01-10) + +* Fix single node etcd cluster with auth ([6e8a04c](https://github.com/bitnami/charts/commit/6e8a04c7284390a155b500b56fef5b33400707b3)) + +## 1.4.1 (2019-01-03) + +* [bitnami/etcd] inc chart version ([6f5c477](https://github.com/bitnami/charts/commit/6f5c4776e6e22b303a9d5416037aac2e607863db)) +* [bitnami/etcd] start temporary etcd single node cluster for root password setting when start cluster ([6987b6c](https://github.com/bitnami/charts/commit/6987b6cd4d681682e1d668ee8e5d235857a8159c)) +* Apply bash style fixes ([a79bbf9](https://github.com/bitnami/charts/commit/a79bbf93e839f1826c6ea0b031da175efb8b78cd)) + +## 1.4.0 (2019-01-02) + +* [bitnami/etcd] Fix etcd after pod restart ([a6b7c15](https://github.com/bitnami/charts/commit/a6b7c1531120ba64b362cc8fc651b658dff5f54d)) + +## 1.3.1 (2018-11-21) + +* Bump chart version ([bf25b0f](https://github.com/bitnami/charts/commit/bf25b0f814da06f12743a0ae853a5233f3eaa157)) +* Fix README ([e574574](https://github.com/bitnami/charts/commit/e574574d466f2b02eb8983d773625d5a01136e43)) + +## 1.2.0 (2018-10-12) + +* [bitnami/etcd] Enable prometheus metrics ([a829979](https://github.com/bitnami/charts/commit/a8299797828f76ad68b74a94a8e02d61946e04a47828f76ad68b74a94a8e02d61946e04a4)) + +## 1.2.1 (2018-10-17) + +* Add global registry option to Bitnami charts ([395ba08](https://github.com/bitnami/charts/commit/395ba08e2bc14ef28a0cae1fada97ed6cf2e777d)) +* Apply some suggestions ([24706a6](https://github.com/bitnami/charts/commit/24706a6163b75700c705f3021bb37790f95423c9)) +* Bump versions ([0cfd3f4](https://github.com/bitnami/charts/commit/0cfd3f421533a532c90438afa287bf46aa10413e)) +* Change logic to determine registry ([9ead294](https://github.com/bitnami/charts/commit/9ead294d5705f2646e8d3b70e14129d23c07bf8a)) +* Check if global is set ([dec26e5](https://github.com/bitnami/charts/commit/dec26e5d0b982905dde2a55fdf2285a7781a64cc)) +* Fix typo ([93170ac](https://github.com/bitnami/charts/commit/93170acc16e842e55aff7b7d944f7fbe025eee91)) +* Remove distro tags in charts ([427ac51](https://github.com/bitnami/charts/commit/427ac51cdf4de70f786563e1971a5d491d32ad54)) +* Reword and update kafka dependencies ([be6cbed](https://github.com/bitnami/charts/commit/be6cbedd27cea4c5c0e30ce70c9790c27ca1a0ec)) + +## 1.2.0 (2018-10-12) + +* [bitnami/etcd] Enable prometheus metrics ([a829979](https://github.com/bitnami/charts/commit/a8299797828f76ad68b74a94a8e02d61946e04a47828f76ad68b74a94a8e02d61946e04a4)) + +## 1.1.6 (2018-10-10) + +* etcd: bump chart appVersion to `3.3.10` ([8ccb0a7](https://github.com/bitnami/charts/commit/8ccb0a7a27704cc18956b5b30f8fd6761024351b)) +* etcd: bump chart version to `1.1.6` ([9109fee](https://github.com/bitnami/charts/commit/9109fee781df43b13db5724e5d88bca1455bdb02)) +* etcd: update to `3.3.10-debian-9` ([29c8250](https://github.com/bitnami/charts/commit/29c8250b1dbd9b0a01c2d23bf597cbae1cf3d831)) + +## 1.1.5 (2018-10-05) + +* Add kubeapps text to charts READMEs ([2f6dc51](https://github.com/bitnami/charts/commit/2f6dc51ce6307d57bd8c20e929da23dd2adf22d5)) + +## 1.1.4 (2018-10-02) + +* Add ) ([728466a](https://github.com/bitnami/charts/commit/728466ac178560c67f2ede913140f50dc5136fd3)) +* Add affinity rules setting ability to bitnami/etcd. ([8259e61](https://github.com/bitnami/charts/commit/8259e61d89ad1c1cb1f4773dfe4bbbab34f65e93)) +* Fix go template inside go template ([a140313](https://github.com/bitnami/charts/commit/a140313f4910d1366170415f0300729c22eda073)) + +## 1.1.3 (2018-09-27) + +* Bump etcd version ([21ff639](https://github.com/bitnami/charts/commit/21ff639d1a4ba915111e1386771f387bf2d81508)) +* Improve getting LoadBalancer address in Bitnami charts NOTES.txt ([a641728](https://github.com/bitnami/charts/commit/a64172812af8b11fac23be2fe7a66b1edb14c71f)) + +## 1.1.2 (2018-09-27) + +* etcd: bump chart appVersion to `3.3.9` ([9bb4db3](https://github.com/bitnami/charts/commit/9bb4db3490a9a2b882d220689d9cadf54f5fdc57)) +* etcd: bump chart version to `1.1.2` ([867b3f2](https://github.com/bitnami/charts/commit/867b3f217c50a01d03ee7f587b9dfb62cf78d6b5)) +* etcd: update to `3.3.9-debian-9` ([d41ab85](https://github.com/bitnami/charts/commit/d41ab856518cc96b24be052fbcd2c12ddad92b1c)) + +## 1.1.1 (2018-09-26) + +* [bitnami/etcd] fix svc template when passing loadBalancerIP property ([f1e4595](https://github.com/bitnami/charts/commit/f1e45950e9b3f124c94b377cdd65121c49a996ed)) + +## 1.1.0 (2018-09-24) + +* Fix scaling etcd nodes ([63fbf10](https://github.com/bitnami/charts/commit/63fbf10e254c9e45505f60b1249248255bbb3545)) + +## 1.0.0 (2018-09-21) + +* [bitnami/etcd] FIx chart not upgrading ([fbc436f](https://github.com/bitnami/charts/commit/fbc436f9e5410e6ae27b53348523959ea9cd29d6)) + +## 0.0.4 (2018-09-13) + +* Allow running etcd on a non-default namespace ([2030f4a](https://github.com/bitnami/charts/commit/2030f4a51050e2049d997ef3774df98ffd4445c6)) + +## 0.0.3 (2018-09-04) + +* Fix issue configuring etcd auth ([73562d8](https://github.com/bitnami/charts/commit/73562d8db46fcdc9921f177d40097c03190995df)) + +## 0.0.2 (2018-08-06) + +* Apply several suggestions from juan131 and javsalgar ([5be0e5d](https://github.com/bitnami/charts/commit/5be0e5dac5bde325c273f2e4ec5e099797e0d505)) +* Create helper template to return auth options for etcdctl command line ([9e950c9](https://github.com/bitnami/charts/commit/9e950c9657c317bcfed238779de2662fe9b2100e)) +* Enable persistence by default ([6192599](https://github.com/bitnami/charts/commit/6192599c757bdc834214fa71e80063d2925fc267)) +* Improve notes to access deployed services ([0071eb5](https://github.com/bitnami/charts/commit/0071eb5545f7774eeab9ea335c660df644ad3e4c)) +* Minor improvements ([a059522](https://github.com/bitnami/charts/commit/a05952290a8507b9c02b9087c8e7cbea3bfbab30)) +* Update README.md ([11705de](https://github.com/bitnami/charts/commit/11705de6d4ecf718090bfd3e435c7c529fe4b3c8)) + +## 0.0.1 (2018-06-27) + +* Firs version of etcd chart ([cf5869b](https://github.com/bitnami/charts/commit/cf5869b1651d4abc2be58b42f2a6cb4957cdae40)) diff --git a/addons/etcd/3.6/chart/etcd-3.6/Chart.yaml b/addons/etcd/3.6/chart/etcd-3.6/Chart.yaml new file mode 100644 index 00000000..cdd274a6 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/Chart.yaml @@ -0,0 +1,35 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + images: | + - name: etcd + image: registry.drycc.cc/drycc-addons/etcd:3.6 + - name: base + image: registry.drycc.cc/drycc/base:trixie + licenses: Apache-2.0 + tanzuCategory: service +apiVersion: v2 +appVersion: 3.6.6 +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.4 +description: etcd is a distributed key-value store designed to securely store data + across a cluster. etcd is widely used in production on account of its reliability, + fault-tolerance and ease of use. +home: https://bitnami.com +icon: https://dyltqmyl993wv.cloudfront.net/assets/stacks/etcd/img/etcd-stack-220x234.png +keywords: +- etcd +- cluster +- database +- cache +- key-value +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/bitnami/charts +name: etcd +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/etcd +version: 12.0.20 diff --git a/addons/etcd/3.6/chart/etcd-3.6/README.md b/addons/etcd/3.6/chart/etcd-3.6/README.md new file mode 100644 index 00000000..79e16663 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/README.md @@ -0,0 +1,899 @@ + + +# Bitnami Secure Images Helm chart for Etcd + +etcd is a distributed key-value store designed to securely store data across a cluster. etcd is widely used in production on account of its reliability, fault-tolerance and ease of use. + +[Overview of Etcd](https://etcd.io/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/etcd +``` + +## Why use Bitnami Secure Images? + +Those are hardened, minimal CVE images built and maintained by Bitnami. Bitnami Secure Images are based on the cloud-optimized, security-hardened enterprise [OS Photon Linux](https://vmware.github.io/photon/). Why choose BSI images? + +- Hardened secure images of popular open source software with Near-Zero Vulnerabilities +- Vulnerability Triage & Prioritization with VEX Statements, KEV and EPSS Scores +- Compliance focus with FIPS, STIG, and air-gap options, including secure bill of materials (SBOM) +- Software supply chain provenance attestation through in-toto +- First class support for the internet’s favorite Helm charts + +Each image comes with valuable security metadata. You can view the metadata in [our public catalog here](https://app-catalog.vmware.com/bitnami/apps). Note: Some data is only available with [commercial subscriptions to BSI](https://bitnami.com/). + +![Alt text](https://github.com/bitnami/containers/blob/main/BSI%20UI%201.png?raw=true "Application details") +![Alt text](https://github.com/bitnami/containers/blob/main/BSI%20UI%202.png?raw=true "Packaging report") + +If you are looking for our previous generation of images based on Debian Linux, please see the [Bitnami Legacy registry](https://hub.docker.com/u/bitnamilegacy). + +## Introduction + +This chart bootstraps a [etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +These commands deploy etcd on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `metrics.enabled` to true. This will expose the etcd native Prometheus port in the container and service (if `metrics.useSeparateEndpoint=true`). It will all have the necessary annotations to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `PodMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `*.metrics.podMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "PodMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### Update credentials + +Bitnami charts configure credentials at first boot. Any further change in the secrets or credentials require manual intervention. Follow these instructions: + +- Update the user password following [the upstream documentation](https://etcd.io/docs/latest/op-guide/authentication/) +- Update the password secret with the new values (replace the SECRET_NAME and PASSWORD placeholders) + +```shell +kubectl create secret generic SECRET_NAME --from-literal=etcd-root-password=PASSWORD --dry-run -o yaml | kubectl apply -f - +``` + +### Cluster configuration + +The Bitnami etcd chart can be used to bootstrap an etcd cluster, easy to scale and with available features to implement disaster recovery. It uses static discovery configured via environment variables to bootstrap the etcd cluster. Based on the number of initial replicas, and using the A records added to the DNS configuration by the headless service, the chart can calculate every advertised peer URL. + +The chart makes use of some extra elements offered by Kubernetes to ensure the bootstrapping is successful: + +- It sets a "Parallel" Pod Management Policy. This is critical, since all the etcd replicas should be created simultaneously to guarantee they can find each other. +- It records "not ready" pods in the DNS, so etcd replicas are reachable using their associated FQDN before they're actually ready. + +Learn more about [etcd discovery](https://etcd.io/docs/current/op-guide/clustering/#discovery), [Pod Management Policies](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies) and [recording "not ready" pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-hostname-and-subdomain-fields). + +Here is an example of the environment configuration bootstrapping an etcd cluster with 3 replicas: + +| Member | Variable | Value | +|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 0 | ETCD_NAME | etcd-0 | +| 0 | ETCD_INITIAL_ADVERTISE_PEER_URLS | | +|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 1 | ETCD_NAME | etcd-1 | +| 1 | ETCD_INITIAL_ADVERTISE_PEER_URLS | | +|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 2 | ETCD_NAME | etcd-2 | +| 2 | ETCD_INITIAL_ADVERTISE_PEER_URLS | | +|---------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| * | ETCD_INITIAL_CLUSTER_TOKEN | etcd-cluster-k8s | +| * | ETCD_INITIAL_CLUSTER | etcd-0=,etcd-1=,etcd-2= | + +The probes (readiness & liveness) are delayed 60 seconds by default, to give the etcd replicas time to start and find each other. After that period, the *etcdctl endpoint health* command is used to periodically perform health checks on every replica. + +#### Scalability + +The Bitnami etcd chart uses etcd reconfiguration operations to add/remove members of the cluster during scaling. + +When scaling down, a "pre-stop" lifecycle hook is used to ensure that the `etcdctl member remove` command is executed. The hook stores the output of this command in the persistent volume attached to the etcd pod. This hook is also executed when the pod is manually removed using the `kubectl delete pod` command or rescheduled by Kubernetes for any reason. This implies that the cluster can be scaled up/down without human intervention. + +Here is an example to explain how this works: + +1. An etcd cluster with three members running on a three-nodes Kubernetes cluster is bootstrapped. +2. After a few days, the cluster administrator decides to upgrade the kernel on one of the cluster nodes. To do so, the administrator drains the node. Pods running on that node are rescheduled to a different one. +3. During the pod eviction process, the "pre-stop" hook removes the etcd member from the cluster. Thus, the etcd cluster is scaled down to only two members. +4. Once the pod is scheduled on another node and initialized, the etcd member is added again to the cluster using the *etcdctl member add* command. Thus, the etcd cluster is scaled up to three replicas. + +If, for whatever reason, the "pre-stop" hook fails at removing the member, the initialization logic is able to detect that something went wrong by checking the `etcdctl member remove` command output that was stored in the persistent volume. It then uses the `etcdctl member update` command to add back the member. In this case, the cluster isn't automatically scaled down/up while the pod is recovered. Therefore, when other members attempt to connect to the pod, it may cause warnings or errors like the one below: + +```text +E | rafthttp: failed to dial XXXXXXXX on stream Message (peer XXXXXXXX failed to find local node YYYYYYYYY) +I | rafthttp: peer XXXXXXXX became inactive (message send to peer failed) +W | rafthttp: health check for peer XXXXXXXX could not connect: dial tcp A.B.C.D:2380: i/o timeout +``` + +Learn more about [etcd runtime configuration](https://etcd.io/docs/current/op-guide/runtime-configuration/) and how to safely [drain a Kubernetes node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/). + +#### Cluster updates + +When updating the etcd StatefulSet (such as when upgrading the chart version via the *helm upgrade* command), every pod must be replaced following the StatefulSet update strategy. + +The chart uses a "RollingUpdate" strategy by default and with default Kubernetes values. In other words, it updates each Pod, one at a time, in the same order as Pod termination (from the largest ordinal to the smallest). It will wait until an updated Pod is "Running" and "Ready" prior to updating its predecessor. + +Learn more about [StatefulSet update strategies](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). + +#### Disaster recovery + +If, for whatever reason, (N-1)/2 members of the cluster fail and the "pre-stop" hooks also fail at removing them from the cluster, the cluster disastrously fails, irrevocably losing quorum. Once quorum is lost, the cluster cannot reach consensus and therefore cannot continue accepting updates. Under this circumstance, the only possible solution is usually to restore the cluster from a snapshot. + +> IMPORTANT: All members should restore using the same snapshot. + +The Bitnami etcd chart solves this problem by optionally offering a Kubernetes cron job that periodically snapshots the keyspace and stores it in a RWX volume. In case the cluster disastrously fails, the pods will automatically try to restore it using the last avalable snapshot. + +[Learn how to enable this disaster recovery feature](#enable-disaster-recovery-features). + +The chart also sets by default a "soft" Pod AntiAffinity to reduce the risk of the cluster failing disastrously. + +Learn more about [etcd recovery](https://etcd.io/docs/current/op-guide/recovery), [Kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) and [pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) + +### Enable security for etcd + +The etcd chart can be configured with Role-based access control and TLS encryption to improve its security. + +#### Configure RBAC + +In order to enable Role-Based Access Control for etcd, set the following parameters: + +```text +auth.rbac.create=true +auth.rbac.rootPassword=ETCD_ROOT_PASSWORD +``` + +These parameters create a `root` user with an associate `root` role with access to everything. The remaining users will use the `guest` role and won't have permissions to do anything. + +#### Configure TLS for server-to-server communications + +In order to enable secure transport between peer nodes deploy the helm chart with these options: + +```text +auth.peer.secureTransport=true +auth.peer.useAutoTLS=true +``` + +#### Configure certificates for client communication + +In order to enable secure transport between client and server, create a secret containing the certificate and key files and the CA used to sign the client certificates. In this case, create the secret and then deploy the chart with these options: + +```text +auth.client.secureTransport=true +auth.client.enableAuthentication=true +auth.client.existingSecret=etcd-client-certs +``` + +Learn more about the [etcd security model](https://etcd.io/) and how to [generate self-signed certificates for etcd](https://coreos.com/os/docs/latest/generate-self-signed-certificates.html). + +### Enable disaster recovery features + +The Bitnami etcd Helm chart supports automatic disaster recovery by periodically snapshotting the keyspace. If the cluster permanently loses more than (N-1)/2 members, it tries to recover the cluster from a previous snapshot. + +Enable this feature with the following parameters: + +```text +persistence.enabled=true +disasterRecovery.enabled=true +disasterRecovery.pvc.size=2Gi +disasterRecovery.pvc.storageClassName=nfs +``` + +If the `startFromSnapshot.*` parameters are used at the same time as the `disasterRecovery.*` parameters, the PVC provided via the `startFromSnapshot.existingClaim` parameter will be used to store the periodical snapshots. + +> NOTE: The disaster recovery feature requires volumes with ReadWriteMany access mode. + +### Backup and restore + +Two different approaches are available to back up and restore this Helm Chart: + +- Back up the data from the source deployment and restore it in a new deployment using etcd's built-in backup/restore tools. +- Back up the persistent volumes from the source deployment and attach them to a new deployment using Velero, a Kubernetes backup/restore tool. + +#### Method 1: Backup and restore data using etcd's built-in tools + +This method involves the following steps: + +- Use the *etcdctl* tool to create a snapshot of the data in the source cluster. +- Make the snapshot available in a Kubernetes PersistentVolumeClaim (PVC) that supports ReadWriteMany access (for example, a PVC created with the NFS storage class) +- Restore the data snapshot in a new cluster using the <%= variable :catalog_name, :platform %> etcd Helm chart's *startFromSnapshot.existingClaim* and *startFromSnapshot.snapshotFilename* parameters to define the source PVC and source filename for the snapshot. + +> NOTE: Under this approach, it is important to create the new deployment on the destination cluster using the same credentials as the original deployment on the source cluster. + +#### Method 2: Back up and restore persistent data volumes + +This method involves copying the persistent data volumes for the etcd nodes and reusing them in a new deployment with [Velero](https://velero.io/), an open source Kubernetes backup/restore tool. This method is only suitable when: + +- The Kubernetes provider is [supported by Velero](https://velero.io/docs/latest/supported-providers/). +- Both clusters are on the same Kubernetes provider, as this is a requirement of [Velero's native support for migrating persistent volumes](https://velero.io/docs/latest/migration-case/). +- The restored deployment on the destination cluster will have the same name, namespace, topology and credentials as the original deployment on the source cluster. + +This method involves the following steps: + +- Install Velero on the source and destination clusters. +- Use Velero to back up the PersistentVolumes (PVs) used by the etcd deployment on the source cluster. +- Use Velero to restore the backed-up PVs on the destination cluster. +- Create a new etcd deployment on the destination cluster with the same deployment name, credentials and other parameters as the original. This new deployment will use the restored PVs and hence the original data. + +### Exposing etcd metrics + +The metrics exposed by etcd can be exposed to be scraped by Prometheus. Metrics can be scraped from within the cluster using any of the following approaches: + +- Adding the required annotations for Prometheus to discover the metrics endpoints, as in the example below: + +```yaml +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics/cluster" + prometheus.io/port: "9000" +``` + +- Creating a ServiceMonitor or PodMonitor entry (when the Prometheus Operator is available in the cluster) +- Using something similar to the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +### Using custom configuration + +In order to use custom configuration parameters, two options are available: + +- Using environment variables: etcd allows setting environment variables that map to configuration settings. In order to set extra environment variables, you can use the `extraEnvVars` property. Alternatively, you can use a ConfigMap or a Secret with the environment variables using the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +```yaml +extraEnvVars: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: "0" + - name: ETCD_HEARTBEAT_INTERVAL + value: "150" +``` + +- Using a custom `etcd.conf.yml`: The etcd chart allows mounting a custom `etcd.conf.yml` file as ConfigMap. In order to so, you can use the `configuration` property. Alternatively, you can use an existing ConfigMap using the `existingConfigmap` parameter. + +### Auto Compaction + +Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace. + +`autoCompactionMode`, by default periodic. Valid values: "periodic", "revision". + +- 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. "5m"). +- 'revision' for revision number based retention. +`autoCompactionRetention` for mvcc key value store in hour, by default 0, means disabled. + +You can enable auto compaction by using following parameters: + +```console +autoCompactionMode=periodic +autoCompactionRetention=10m +``` + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as the etcd app (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) image stores the etcd data at the `/bitnami/etcd` path of the container. Persistent Volume Claims are used to keep the data across statefulsets. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace template | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### etcd parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `image.registry` | etcd image registry | `REGISTRY_NAME` | +| `image.repository` | etcd image name | `REPOSITORY_NAME/etcd` | +| `image.digest` | etcd image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | etcd image pull policy | `IfNotPresent` | +| `image.pullSecrets` | etcd image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `auth.rbac.create` | Switch to enable RBAC authentication | `true` | +| `auth.rbac.allowNoneAuthentication` | Allow to use etcd without configuring RBAC authentication | `true` | +| `auth.rbac.rootPassword` | Root user password. The root user is always `root` | `""` | +| `auth.rbac.existingSecret` | Name of the existing secret containing credentials for the root user | `""` | +| `auth.rbac.existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `""` | +| `auth.token.enabled` | Enables token authentication | `true` | +| `auth.token.type` | Authentication token type. Allowed values: 'simple' or 'jwt' | `jwt` | +| `auth.token.privateKey.filename` | Name of the file containing the private key for signing the JWT token | `jwt-token.pem` | +| `auth.token.privateKey.existingSecret` | Name of the existing secret containing the private key for signing the JWT token | `""` | +| `auth.token.signMethod` | JWT token sign method | `RS256` | +| `auth.token.ttl` | JWT token TTL | `10m` | +| `auth.client.secureTransport` | Switch to encrypt client-to-server communications using TLS certificates | `false` | +| `auth.client.useAutoTLS` | Switch to automatically create the TLS certificates | `false` | +| `auth.client.existingSecret` | Name of the existing secret containing the TLS certificates for client-to-server communications | `""` | +| `auth.client.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` | +| `auth.client.certFilename` | Name of the file containing the client certificate | `cert.pem` | +| `auth.client.certKeyFilename` | Name of the file containing the client certificate private key | `key.pem` | +| `auth.client.caFilename` | Name of the file containing the client CA certificate | `""` | +| `auth.peer.secureTransport` | Switch to encrypt server-to-server communications using TLS certificates | `false` | +| `auth.peer.useAutoTLS` | Switch to automatically create the TLS certificates | `false` | +| `auth.peer.existingSecret` | Name of the existing secret containing the TLS certificates for server-to-server communications | `""` | +| `auth.peer.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` | +| `auth.peer.certFilename` | Name of the file containing the peer certificate | `cert.pem` | +| `auth.peer.certKeyFilename` | Name of the file containing the peer certificate private key | `key.pem` | +| `auth.peer.caFilename` | Name of the file containing the peer CA certificate | `""` | +| `autoCompactionMode` | Auto compaction mode, by default periodic. Valid values: "periodic", "revision". | `""` | +| `autoCompactionRetention` | Auto compaction retention for mvcc key value store in hour, by default 0, means disabled | `""` | +| `initialClusterToken` | Initial cluster token. Can be used to protect etcd from cross-cluster-interaction, which might corrupt the clusters. | `etcd-cluster-k8s` | +| `logLevel` | Sets the log level for the etcd process. Allowed values: 'debug', 'info', 'warn', 'error', 'panic', 'fatal' | `info` | +| `maxProcs` | Limits the number of operating system threads that can execute user-level | `""` | +| `configuration` | etcd configuration. Specify content for etcd.conf.yml | `""` | +| `existingConfigmap` | Existing ConfigMap with etcd configuration | `""` | +| `extraEnvVars` | Extra environment variables to be set on etcd container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | +| `command` | Default container command (useful when using custom images) | `[]` | +| `args` | Default container args (useful when using custom images) | `[]` | + +### etcd statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of etcd replicas to deploy | `1` | +| `updateStrategy.type` | Update strategy type, can be set to RollingUpdate or OnDelete. | `RollingUpdate` | +| `podManagementPolicy` | Pod management policy for the etcd statefulset | `Parallel` | +| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `hostAliases` | etcd pod host aliases | `[]` | +| `lifecycleHooks` | Override default etcd container hooks | `{}` | +| `containerPorts.client` | Client port to expose at container level | `2379` | +| `containerPorts.peer` | Peer port to expose at container level | `2380` | +| `containerPorts.metrics` | Metrics port to expose at container level when metrics.useSeparateEndpoint is true | `9090` | +| `podSecurityContext.enabled` | Enabled etcd pods' Security Context | `true` | +| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `podSecurityContext.fsGroup` | Set etcd pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled etcd containers' Security Context | `true` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `containerSecurityContext.runAsUser` | Set etcd containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsGroup` | Set etcd containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Controller container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set primary container's Security Context privileged | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set primary container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | +| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for etcd pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for etcd container(s) | `[]` | +| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volumeClaimTemplates for etcd container(s) | `[]` | +| `initContainers` | Add additional init containers to the etcd pods | `[]` | +| `sidecars` | Add additional sidecar containers to the etcd pods | `[]` | +| `podAnnotations` | Annotations for etcd pods | `{}` | +| `podLabels` | Extra labels for etcd pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `priorityClassName` | Name of the priority class to be used by etcd pods | `""` | +| `runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `shareProcessNamespace` | Enable shared process namespace in a pod. | `false` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` | +| `persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | + +### Traffic exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.enabled` | create second service if equal true | `true` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.ports.client` | etcd client port | `2379` | +| `service.ports.peer` | etcd peer port | `2380` | +| `service.ports.metrics` | etcd metrics port when metrics.useSeparateEndpoint is true | `9090` | +| `service.nodePorts.client` | Specify the nodePort client value for the LoadBalancer and NodePort service types. | `""` | +| `service.nodePorts.peer` | Specify the nodePort peer value for the LoadBalancer and NodePort service types. | `""` | +| `service.nodePorts.metrics` | Specify the nodePort metrics value for the LoadBalancer and NodePort service types. The metrics port is only exposed when metrics.useSeparateEndpoint is true. | `""` | +| `service.clientPortNameOverride` | etcd client port name override | `""` | +| `service.peerPortNameOverride` | etcd peer port name override | `""` | +| `service.metricsPortNameOverride` | etcd metrics port name override. The metrics port is only exposed when metrics.useSeparateEndpoint is true. | `""` | +| `service.loadBalancerIP` | loadBalancerIP for the etcd service (optional, cloud specific) | `""` | +| `service.loadBalancerClass` | loadBalancerClass for the etcd service (optional, cloud specific) | `""` | +| `service.loadBalancerSourceRanges` | Load Balancer source ranges | `[]` | +| `service.externalIPs` | External IPs | `[]` | +| `service.externalTrafficPolicy` | %%MAIN_CONTAINER_NAME%% service external traffic policy | `Cluster` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Additional annotations for the etcd service | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | + +### Persistence parameters + +| Name | Description | Value | +| -------------------------- | --------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | If true, use a Persistent Volume Claim. If false, use emptyDir. | `true` | +| `persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for etcd data volume | `8Gi` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +### Network Policy parameters + +| Name | Description | Value | +| --------------------------------------- | --------------------------------------------------------------- | ------ | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ------------ | +| `metrics.enabled` | Expose etcd metrics | `false` | +| `metrics.useSeparateEndpoint` | Use a separate endpoint for exposing metrics | `false` | +| `metrics.podAnnotations` | Annotations for the Prometheus metrics on etcd pods | `{}` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Namespace in which Prometheus is running | `monitoring` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | +| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | +| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | +| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | +| `metrics.prometheusRule.enabled` | Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + +### Snapshotting parameters + +| Name | Description | Value | +| ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `startFromSnapshot.enabled` | Initialize new cluster recovering an existing snapshot | `false` | +| `startFromSnapshot.existingClaim` | Existing PVC containing the etcd snapshot | `""` | +| `startFromSnapshot.snapshotFilename` | Snapshot filename | `""` | +| `disasterRecovery.enabled` | Enable auto disaster recovery by periodically snapshotting the keyspace | `false` | +| `disasterRecovery.cronjob.schedule` | Schedule in Cron format to save snapshots | `*/30 * * * *` | +| `disasterRecovery.cronjob.historyLimit` | Number of successful finished jobs to retain | `1` | +| `disasterRecovery.cronjob.snapshotHistoryLimit` | Number of etcd snapshots to retain, tagged by date | `1` | +| `disasterRecovery.cronjob.snapshotsDir` | Directory to store snapshots | `/snapshots` | +| `disasterRecovery.cronjob.podAnnotations` | Pod annotations for cronjob pods | `{}` | +| `disasterRecovery.cronjob.podSecurityContext.enabled` | Enable security context for Snapshotter pods | `true` | +| `disasterRecovery.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `disasterRecovery.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `disasterRecovery.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `disasterRecovery.cronjob.podSecurityContext.fsGroup` | Group ID for the Snapshotter filesystem | `1001` | +| `disasterRecovery.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `disasterRecovery.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `disasterRecovery.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `disasterRecovery.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `disasterRecovery.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `disasterRecovery.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `disasterRecovery.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `disasterRecovery.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `disasterRecovery.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `disasterRecovery.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `disasterRecovery.cronjob.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if disasterRecovery.cronjob.resources is set (disasterRecovery.cronjob.resources is recommended for production). | `nano` | +| `disasterRecovery.cronjob.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `disasterRecovery.cronjob.nodeSelector` | Node labels for cronjob pods assignment | `{}` | +| `disasterRecovery.cronjob.tolerations` | Tolerations for cronjob pods assignment | `[]` | +| `disasterRecovery.cronjob.podLabels` | Labels that will be added to pods created by cronjob | `{}` | +| `disasterRecovery.cronjob.serviceAccountName` | Specifies the service account to use for disaster recovery cronjob | `""` | +| `disasterRecovery.cronjob.command` | Override default snapshot container command (useful when you want to customize the snapshot logic) | `[]` | +| `disasterRecovery.pvc.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `disasterRecovery.pvc.size` | PVC Storage Request | `2Gi` | +| `disasterRecovery.pvc.storageClassName` | Storage Class for snapshots volume | `nfs` | +| `disasterRecovery.pvc.subPath` | Path within the volume from which to mount | `""` | + +### Service account parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable/disable service account creation | `true` | +| `serviceAccount.name` | Name of the service account to create or use | `""` | +| `serviceAccount.automountServiceAccountToken` | Enable/disable auto mounting of service account token | `false` | +| `serviceAccount.annotations` | Additional annotations to be included on the service account | `{}` | +| `serviceAccount.labels` | Additional labels to be included on the service account | `{}` | + +### etcd "pre-upgrade" K8s Job parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `preUpgradeJob.enabled` | Enable running a pre-upgrade job on Helm upgrades that removes obsolete members | `true` | +| `preUpgradeJob.annotations` | Add annotations to the etcd "pre-upgrade" job | `{}` | +| `preUpgradeJob.podLabels` | Additional pod labels for etcd "pre-upgrade" job | `{}` | +| `preUpgradeJob.podAnnotations` | Additional pod annotations for etcd "pre-upgrade" job | `{}` | +| `preUpgradeJob.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `preUpgradeJob.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `preUpgradeJob.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `preUpgradeJob.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `preUpgradeJob.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `preUpgradeJob.affinity` | Affinity for pod assignment | `{}` | +| `preUpgradeJob.nodeSelector` | Node labels for pod assignment | `{}` | +| `preUpgradeJob.tolerations` | Tolerations for pod assignment | `[]` | +| `preUpgradeJob.containerSecurityContext.enabled` | Enabled "pre-upgrade" job's containers' Security Context | `true` | +| `preUpgradeJob.containerSecurityContext.seLinuxOptions` | Set SELinux options in "pre-upgrade" job's containers | `{}` | +| `preUpgradeJob.containerSecurityContext.runAsUser` | Set runAsUser in "pre-upgrade" job's containers' Security Context | `1001` | +| `preUpgradeJob.containerSecurityContext.runAsGroup` | Set runAsUser in "pre-upgrade" job's containers' Security Context | `1001` | +| `preUpgradeJob.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "pre-upgrade" job's containers' Security Context | `true` | +| `preUpgradeJob.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "pre-upgrade" job's containers' Security Context | `true` | +| `preUpgradeJob.containerSecurityContext.privileged` | Set privileged in "pre-upgrade" job's containers' Security Context | `false` | +| `preUpgradeJob.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "pre-upgrade" job's containers' Security Context | `false` | +| `preUpgradeJob.containerSecurityContext.capabilities.add` | List of capabilities to be added in "pre-upgrade" job's containers | `[]` | +| `preUpgradeJob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "pre-upgrade" job's containers | `["ALL"]` | +| `preUpgradeJob.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "pre-upgrade" job's containers | `RuntimeDefault` | +| `preUpgradeJob.podSecurityContext.enabled` | Enabled "pre-upgrade" job's pods' Security Context | `true` | +| `preUpgradeJob.podSecurityContext.fsGroupChangePolicy` | Set fsGroupChangePolicy in "pre-upgrade" job's pods' Security Context | `Always` | +| `preUpgradeJob.podSecurityContext.sysctls` | List of sysctls to allow in "pre-upgrade" job's pods' Security Context | `[]` | +| `preUpgradeJob.podSecurityContext.supplementalGroups` | List of supplemental groups to add to "pre-upgrade" job's pods' Security Context | `[]` | +| `preUpgradeJob.podSecurityContext.fsGroup` | Set fsGroup in "pre-upgrade" job's pods' Security Context | `1001` | +| `preUpgradeJob.resourcesPreset` | Set etcd "pre-upgrade" job's container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if preUpgradeJob.resources is set (preUpgradeJob.resources is recommended for production). | `micro` | +| `preUpgradeJob.resources` | Set etcd "pre-upgrade" job's container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `preUpgradeJob.startDelay` | Optional delay before starting the pre-upgrade hook (in seconds). | `""` | + +### Defragmentation parameters + +| Name | Description | Value | +| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | ---------------- | +| `defrag.enabled` | Enable automatic defragmentation. This is most effective when paired with auto compaction: consider setting "autoCompactionRetention > 0". | `false` | +| `defrag.cronjob.startingDeadlineSeconds` | Number of seconds representing the deadline for starting the job if it misses scheduled time for any reason | `""` | +| `defrag.cronjob.schedule` | Schedule in Cron format to defrag (daily at midnight by default) | `0 0 * * *` | +| `defrag.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Forbid` | +| `defrag.cronjob.suspend` | Boolean that indicates if the controller must suspend subsequent executions (not applied to already started executions) | `false` | +| `defrag.cronjob.successfulJobsHistoryLimit` | Number of successful finished jobs to retain | `1` | +| `defrag.cronjob.failedJobsHistoryLimit` | Number of failed finished jobs to retain | `1` | +| `defrag.cronjob.labels` | Additional labels to be added to the Defrag cronjob | `{}` | +| `defrag.cronjob.annotations` | Annotations to be added to the Defrag cronjob | `{}` | +| `defrag.cronjob.activeDeadlineSeconds` | Number of seconds relative to the startTime that the job may be continuously active before the system tries to terminate it | `""` | +| `defrag.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` | +| `defrag.cronjob.podLabels` | Labels that will be added to pods created by Defrag cronjob | `{}` | +| `defrag.cronjob.podAnnotations` | Pod annotations for Defrag cronjob pods | `{}` | +| `defrag.cronjob.podSecurityContext.enabled` | Enable security context for Defrag pods | `true` | +| `defrag.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `defrag.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `defrag.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `defrag.cronjob.podSecurityContext.fsGroup` | Group ID for the Defrag filesystem | `1001` | +| `defrag.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `defrag.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `defrag.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `defrag.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `defrag.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `defrag.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `defrag.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `defrag.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `defrag.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `defrag.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `defrag.cronjob.nodeSelector` | Node labels for pod assignment in Defrag cronjob | `{}` | +| `defrag.cronjob.tolerations` | Tolerations for pod assignment in Defrag cronjob | `[]` | +| `defrag.cronjob.serviceAccountName` | Specifies the service account to use for Defrag cronjob | `""` | +| `defrag.cronjob.command` | Override default container command for defragmentation (useful when using custom images) | `[]` | +| `defrag.cronjob.args` | Override default container args (useful when using custom images) | `[]` | +| `defrag.cronjob.resourcesPreset` | Set container resources according to one common preset | `nano` | +| `defrag.cronjob.resources` | Set container requests and limits for different resources like CPU or | `{}` | +| `defrag.cronjob.extraEnvVars` | Extra environment variables to be set on defrag cronjob container | `[]` | +| `defrag.cronjob.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `defrag.cronjob.extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | + +### Other parameters + +| Name | Description | Value | +| -------------------- | -------------------------------------------------------------- | ------ | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `51%` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.rbac.rootPassword=secretpassword oci://REGISTRY_NAME/REPOSITORY_NAME/etcd +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the etcd `root` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/etcd +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/etcd/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 11.0.0 + +This version introduces the following breaking changes: + +- Remove `initialClusterState` which was unreliable at detecting cluster state. From now on, each node will contact other members to determine cluster state. If no members are available and the data dir is empty, then it bootstraps a new cluster. +- Remove `removeMemberOnContainerTermination` which was unreliable at removing stale members during replica count updates. Instead, a pre-upgrade hook is added to check and remove stale members. +- Remove support for manual scaling with `kubectl` or autoscaler. Upgrading of any kind including increasing replica count must be done with `helm upgrade` exclusively. CD automation tools that respect Helm hooks such as ArgoCD can also be used. + +### To 10.7.0 + +This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). + +### To 10.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +### To 9.0.0 + +This version adds a new label `app.kubernetes.io/component=etcd` to the StatefulSet and pods. Due to this change, the StatefulSet will be replaced (as it's not possible to add additional `spec.selector.matchLabels` to an existing StatefulSet) and the pods will be recreated. To upgrade to this version from a previous version, you need to run the following steps: + +1. Add new label to your pods + + ```console + kubectl label pod my-release-0 app.kubernetes.io/component=etcd + # Repeat for all etcd pods, based on configured .replicaCount (excluding the etcd snappshoter pod, if .disasterRecovery.enabled is set to true) + ```` + +2. Remove the StatefulSet keeping the pods: + + ```console + kubectl delete statefulset my-release --cascade=orphan + ``` + +3. Upgrade your cluster: + + ```console + helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd --set auth.rbac.rootPassword=$ETCD_ROOT_PASSWORD + ``` + + > Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +### To 8.0.0 + +This version reverts the change in the previous major bump ([7.0.0](https://github.com/bitnami/charts/tree/main/bitnami/etcd#to-700)). Now the default `etcd` branch is `3.5` again once confirmed by the [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation) that this version is production-ready once solved the data corruption issue. + +### To 7.0.0 + +This version changes the default `etcd` branch to `3.4` as suggested by [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation). In order to migrate the data follow the official etcd instructions. + +### To 6.0.0 + +This version introduces several features and performance improvements: + +- The statefulset can now be scaled using `kubectl scale` command. Using `helm upgrade` to recalculate available endpoints is no longer needed. +- The scripts used for bootstrapping, runtime reconfiguration, and disaster recovery have been refactored and moved to the etcd container with two purposes: removing technical debt & improving the stability. +- Several parameters were reorganized to simplify the structure and follow the same standard used on other Bitnami charts: + - `etcd.initialClusterState` is renamed to `initialClusterState`. + - `statefulset.replicaCount` is renamed to `replicaCount`. + - `statefulset.podManagementPolicy` is renamed to `podManagementPolicy`. + - `statefulset.updateStrategy` and `statefulset.rollingUpdatePartition` are merged into `updateStrategy`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - `configFileConfigMap` is deprecated in favor of `configuration` and `existingConfigmap`. + - `envVarsConfigMap` is deprecated in favor of `extraEnvVars`, `extraEnvVarsCM` and `extraEnvVarsSecret`. + - `allowNoneAuthentication` is renamed to `auth.rbac.allowNoneAuthentication`. +- New parameters/features were added: + - `extraDeploy` to deploy any extra desired object. + - `initContainers` and `sidecars` to define custom init containers and sidecars. + - `extraVolumes`, `extraVolumeMounts` and `extraVolumeClaimTemplates` to define custom volumes, mount points and volume claim templates. + - Probes can be now customized, and support to startup probes is added. + - LifecycleHooks can be customized using `lifecycleHooks` parameter. + - The default command/args can be customized using `command` and `args` parameters. +- Metrics integration with Prometheus Operator does no longer use a ServiceMonitor object, but a PodMonitor instead. + +Consequences: + +- Backwards compatibility is not guaranteed unless you adapt you **values.yaml** according to the changes described above. + +### To 5.2.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 5.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +### To 4.4.14 + +In this release we addressed a vulnerability that showed the `ETCD_ROOT_PASSWORD` environment variable in the application logs. Users are advised to update immediately. More information in [this issue](https://github.com/bitnami/charts/issues/1901). + +### To 3.0.0 + +Backwards compatibility is not guaranteed. The following notables changes were included: + +- **etcdctl** uses v3 API. +- Adds support for auto disaster recovery. +- Labels are adapted to follow the Helm charts best practices. + +To upgrade from previous charts versions, create a snapshot of the keyspace and restore it in a new etcd cluster. Only v3 API data can be restored. +You can use the command below to upgrade your chart by starting a new cluster using an existing snapshot, available in an existing PVC, to initialize the members: + +```console +helm install new-release oci://REGISTRY_NAME/REPOSITORY_NAME/etcd \ + --set statefulset.replicaCount=3 \ + --set persistence.enabled=true \ + --set persistence.size=8Gi \ + --set startFromSnapshot.enabled=true \ + --set startFromSnapshot.existingClaim=my-claim \ + --set startFromSnapshot.snapshotFilename=my-snapshot.db +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is etcd: + +```console +kubectl delete statefulset etcd --cascade=false +``` + +## License + +Copyright © 2023 Drycc Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/NOTES.txt b/addons/etcd/3.6/chart/etcd-3.6/templates/NOTES.txt new file mode 100644 index 00000000..02141168 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/NOTES.txt @@ -0,0 +1,121 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + + +{{- if and (eq .Values.service.type "LoadBalancer") .Values.auth.rbac.allowNoneAuthentication }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer", "auth.rbac.enabled=false" and + "auth.rbac.allowNoneAuthentication=true" you have most likely exposed the etcd + service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.rbac.enabled=true" + providing a valid password on "auth.rbac.rootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/etcd/entrypoint.sh /opt/drycc/scripts/etcd/run.sh + +{{- else }} + +etcd can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To create a pod that you can use as a etcd client run the following command: + + kubectl run {{ template "common.names.fullname" . }}-client --restart='Never' --image {{ template "etcd.image" . }}{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} --env ROOT_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ if .Values.auth.rbac.existingSecret }}{{ .Values.auth.rbac.existingSecret }}{{ else }}{{ template "common.names.fullname" . }}{{ end }} -o jsonpath="{{ if .Values.auth.rbac.existingSecret }}{.data.{{ .Values.auth.rbac.existingSecretPasswordKey }}}{{ else }}{.data.etcd-root-password}{{ end }}" | base64 -d){{- end }} --env ETCDCTL_ENDPOINTS="{{ template "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.ports.client }}" --namespace {{ include "common.names.namespace" . }} --command -- sleep infinity + +Then, you can set/get a key using the commands below: + + kubectl exec --namespace {{ include "common.names.namespace" . }} -it {{ template "common.names.fullname" . }}-client -- bash + {{- $etcdAuthOptions := include "etcd.authOptions" . }} + etcdctl {{ $etcdAuthOptions }} put /message Hello + etcdctl {{ $etcdAuthOptions }} get /message + +To connect to your etcd server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + echo "etcd URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "etcd URL: http://$SERVICE_IP:{{ .Values.service.ports.client }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.service.ports.client }} & + echo "etcd URL: http://127.0.0.1:{{ .Values.service.ports.client }}" + +{{- end }} +{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + + * As rbac is enabled you should add the flag `--user root:$ETCD_ROOT_PASSWORD` to the etcdctl commands. Use the command below to export the password: + + export ETCD_ROOT_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ if .Values.auth.rbac.existingSecret }}{{ .Values.auth.rbac.existingSecret }}{{ else }}{{ template "common.names.fullname" . }}{{ end }} -o jsonpath="{{ if .Values.auth.rbac.existingSecret }}{.data.{{ .Values.auth.rbac.existingSecretPasswordKey }}}{{ else }}{.data.etcd-root-password}{{ end }}" | base64 -d) + +{{- end }} +{{- if .Values.auth.client.secureTransport }} +{{- if .Values.auth.client.useAutoTLS }} + + * As TLS is enabled you should add the flag `--cert-file /drycc/etcd/data/fixtures/client/cert.pem --key-file /drycc/etcd/data/fixtures/client/key.pem --insecure-skip-tls-verify` to the etcdctl commands. + +{{- else }} + + * As TLS is enabled you should add the flag `--cert-file /opt/drycc/etcd/certs/client/{{ .Values.auth.client.certFilename }} --key-file /opt/drycc/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}` to the etcdctl commands. + +{{- end }} + + * You should also export a proper etcdctl endpoint using the https schema. Eg. + + export ETCDCTL_ENDPOINTS=https://{{ template "common.names.fullname" . }}-0:{{ .Values.service.ports.client }} + +{{- end }} +{{- if .Values.auth.client.enableAuthentication }} + + * As TLS host authentication is enabled you should add the flag `--ca-file /opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}` to the etcdctl commands. + +{{- end }} +{{- $autoCompactionValue := (regexReplaceAll "[^0-9]" .Values.autoCompactionRetention "" | int) }} +{{- if and .Values.defrag.enabled (or (empty .Values.autoCompactionRetention) (eq $autoCompactionValue 0)) }} + + * Disk defragmentation in etcd is most effective when paired with key history auto compaction. Consider setting "autoCompactionRetention > 0". + +{{- end }} +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "etcd.validateValues" . }} +{{- include "common.warnings.resources" (dict "sections" (list "" "volumePermissions" "preUpgradeJob" "disasterRecovery.cronjob") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/_helpers.tpl b/addons/etcd/3.6/chart/etcd-3.6/templates/_helpers.tpl new file mode 100644 index 00000000..ce793012 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/_helpers.tpl @@ -0,0 +1,213 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper etcd image name +*/}} +{{- define "etcd.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "etcd.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "etcd.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper etcd peer protocol +*/}} +{{- define "etcd.peerProtocol" -}} +{{- if .Values.auth.peer.secureTransport -}} +{{- print "https" -}} +{{- else -}} +{{- print "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper etcd client protocol +*/}} +{{- define "etcd.clientProtocol" -}} +{{- if .Values.auth.client.secureTransport -}} +{{- print "https" -}} +{{- else -}} +{{- print "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper etcdctl authentication options +*/}} +{{- define "etcd.authOptions" -}} +{{- $rbacOption := "--user root:$ROOT_PASSWORD" -}} +{{- $certsOption := " --cert $ETCD_CERT_FILE --key $ETCD_KEY_FILE" -}} +{{- $autoCertsOption := " --cert /drycc/etcd/data/fixtures/client/cert.pem --key /drycc/etcd/data/fixtures/client/key.pem --insecure-skip-tls-verify" -}} +{{- $caOption := " --cacert $ETCD_TRUSTED_CA_FILE" -}} +{{- $insecureTlsOption := " --insecure-skip-tls-verify" -}} +{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled -}} + {{- printf "%s" $rbacOption -}} +{{- end -}} +{{- if and .Values.auth.client.secureTransport .Values.auth.client.useAutoTLS -}} + {{- printf "%s" $autoCertsOption -}} +{{- else if and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS) -}} + {{- printf "%s" $certsOption -}} + {{- if or .Values.auth.client.enableAuthentication .Values.auth.client.caFilename -}} + {{- printf "%s" $caOption -}} + {{- else -}} + {{- printf "%s" $insecureTlsOption -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the etcd configuration configmap +*/}} +{{- define "etcd.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "etcd.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with etcd credentials +*/}} +{{- define "etcd.secretName" -}} + {{- if .Values.auth.rbac.existingSecret -}} + {{- printf "%s" .Values.auth.rbac.existingSecret | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the secret password key to be retrieved from etcd secret. +*/}} +{{- define "etcd.secretPasswordKey" -}} +{{- if and .Values.auth.rbac.existingSecret .Values.auth.rbac.existingSecretPasswordKey -}} +{{- printf "%s" .Values.auth.rbac.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "etcd-root-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for the etcd token private key +*/}} +{{- define "etcd.token.createSecret" -}} +{{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") (empty .Values.auth.token.privateKey.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with etcd token private key +*/}} +{{- define "etcd.token.secretName" -}} + {{- if .Values.auth.token.privateKey.existingSecret -}} + {{- printf "%s" .Values.auth.token.privateKey.existingSecret | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-jwt-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} + +{{/* +Return the proper Disaster Recovery PVC name +*/}} +{{- define "etcd.disasterRecovery.pvc.name" -}} +{{- if .Values.disasterRecovery.pvc.existingClaim -}} + {{- printf "%s" (tpl .Values.disasterRecovery.pvc.existingClaim $) | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.startFromSnapshot.existingClaim -}} + {{- printf "%s" (tpl .Values.startFromSnapshot.existingClaim $) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "etcd.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "common.names.fullname" .) .Values.serviceAccount.name | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name | trunc 63 | trimSuffix "-" }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "etcd.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "etcd.validateValues.startFromSnapshot.existingClaim" .) -}} +{{- $messages := append $messages (include "etcd.validateValues.startFromSnapshot.snapshotFilename" .) -}} +{{- $messages := append $messages (include "etcd.validateValues.disasterRecovery" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - an existing claim must be provided when startFromSnapshot is enabled */}} +{{- define "etcd.validateValues.startFromSnapshot.existingClaim" -}} +{{- if and .Values.startFromSnapshot.enabled (not .Values.startFromSnapshot.existingClaim) (not .Values.disasterRecovery.enabled) -}} +etcd: startFromSnapshot.existingClaim + An existing claim must be provided when startFromSnapshot is enabled and disasterRecovery is disabled!! + Please provide it (--set startFromSnapshot.existingClaim="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - the snapshot filename must be provided when startFromSnapshot is enabled */}} +{{- define "etcd.validateValues.startFromSnapshot.snapshotFilename" -}} +{{- if and .Values.startFromSnapshot.enabled (not .Values.startFromSnapshot.snapshotFilename) (not .Values.disasterRecovery.enabled) -}} +etcd: startFromSnapshot.snapshotFilename + The snapshot filename must be provided when startFromSnapshot is enabled and disasterRecovery is disabled!! + Please provide it (--set startFromSnapshot.snapshotFilename="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - persistence must be enabled when disasterRecovery is enabled */}} +{{- define "etcd.validateValues.disasterRecovery" -}} +{{- if and .Values.disasterRecovery.enabled (not .Values.persistence.enabled) -}} +etcd: disasterRecovery + Persistence must be enabled when disasterRecovery is enabled!! + Please enable persistence (--set persistence.enabled=true) +{{- end -}} +{{- end -}} + +{{- define "etcd.token.jwtToken" -}} +{{- if (include "etcd.token.createSecret" .) -}} +{{- $jwtToken := lookup "v1" "Secret" (include "common.names.namespace" .) (printf "%s-jwt-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" ) -}} +{{- if $jwtToken -}} +{{ index $jwtToken "data" "jwt-token.pem" | b64dec }} +{{- else -}} +{{ genPrivateKey "rsa" }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/configmap.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/configmap.yaml new file mode 100644 index 00000000..85886310 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "etcd.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + etcd.conf.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-defrag.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-defrag.yaml new file mode 100644 index 00000000..01611d00 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-defrag.yaml @@ -0,0 +1,169 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.defrag.enabled }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ printf "%s-defrag" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.defrag.cronjob.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.defrag.cronjob.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.defrag.cronjob.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.defrag.cronjob.startingDeadlineSeconds }} + startingDeadlineSeconds: {{ .Values.defrag.cronjob.startingDeadlineSeconds }} + {{- end }} + concurrencyPolicy: {{ .Values.defrag.cronjob.concurrencyPolicy }} + schedule: {{ .Values.defrag.cronjob.schedule | quote }} + suspend: {{ .Values.defrag.cronjob.suspend }} + successfulJobsHistoryLimit: {{ .Values.defrag.cronjob.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.defrag.cronjob.failedJobsHistoryLimit }} + jobTemplate: + spec: + template: + metadata: + {{- $mergedLabels := mergeOverwrite (dict) .Values.commonLabels .Values.defrag.cronjob.podLabels }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $mergedLabels "context" $ ) | nindent 12 }} + {{- if .Values.defrag.cronjob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.defrag.cronjob.podAnnotations "context" $) | nindent 12 }} + {{- end }} + spec: + {{- if .Values.defrag.cronjob.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.defrag.cronjob.activeDeadlineSeconds }} + {{- end }} + restartPolicy: {{ .Values.defrag.cronjob.restartPolicy }} + {{- if .Values.defrag.cronjob.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defrag.cronjob.podSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.defrag.cronjob.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.defrag.cronjob.nodeSelector "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.defrag.cronjob.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.defrag.cronjob.tolerations "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.defrag.cronjob.serviceAccountName }} + serviceAccountName: {{ .Values.defrag.cronjob.serviceAccountName | quote }} + {{- end }} + containers: + - name: etcd-defrag + image: {{ include "etcd.image" . }} + imagePullPolicy: "IfNotPresent" + {{- if .Values.defrag.cronjob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defrag.cronjob.containerSecurityContext "context" $) | nindent 16 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if and .Values.auth.rbac.create .Values.auth.token.enabled }} + {{- if .Values.usePasswordFiles }} + - name: ETCD_ROOT_PASSWORD_FILE + value: {{ printf "/opt/drycc/etcd/secrets/%s" (include "etcd.secretPasswordKey" .) }} + {{- else }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: ETCDCTL_CACERT + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + - name: ETCDCTL_KEY + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + - name: ETCDCTL_CERT + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + {{- end }} + {{- if .Values.defrag.cronjob.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.defrag.cronjob.extraEnvVarsCM .Values.defrag.cronjob.extraEnvVarsSecret }} + envFrom: + {{- if .Values.defrag.cronjob.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.defrag.cronjob.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.defrag.cronjob.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.defrag.cronjob.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.defrag.cronjob.command }} + command: {{ .Values.defrag.cronjob.command | toYaml | nindent 16 }} + {{- else }} + command: + - /bin/bash + - -c + - |- + #!/usr/bin/env bash + set -eo pipefail + + # Include library + . /opt/drycc/scripts/libetcd.sh + + # Load etcd environment settings + . /opt/drycc/scripts/etcd-env.sh + + # Common flags + read -r -a flags <<<"$(etcdctl_auth_flags)" + cmd="etcdctl --command-timeout=60s ${flags[@]}" + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $namespace := include "common.names.namespace" . }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $port := int .Values.service.ports.client }} + {{- $endpointStr := "" }} + {{- $replicaCount := (int .Values.replicaCount) }} + {{- $releaseNamespace := include "common.names.namespace" . }} + {{- range $iEndpoint, $e := until $replicaCount }} + {{- $endpoint := printf "%s://%s-%d.%s.%s.svc.%s:%d" (include "etcd.clientProtocol" $) $etcdFullname $iEndpoint $etcdHeadlessServiceName $namespace $clusterDomain $port }} + {{- $endpointStr = printf "%s%s" $endpointStr $endpoint }} + {{- if ne $iEndpoint (sub $replicaCount 1) }} + {{- $endpointStr = printf "%s," $endpointStr }} + {{- end }} + {{- end }} + {{- $endpointStr = printf "%s" $endpointStr }} + $cmd --endpoints={{ $endpointStr | quote}} defrag + {{- end }} + {{- if .Values.defrag.cronjob.args }} + args: {{ .Values.defrag.cronjob.args | toYaml | nindent 16 }} + {{- end }} + {{- if .Values.defrag.cronjob.resources }} + resources: {{- toYaml .Values.defrag.cronjob.resources | nindent 16 }} + {{- else if ne .Values.defrag.cronjob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defrag.cronjob.resourcesPreset) | nindent 16 }} + {{- end }} + volumeMounts: + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + mountPath: /opt/drycc/etcd/certs/client/ + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + mountPath: /opt/drycc/etcd/secrets/ + readOnly: true + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) (and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled)) }} + volumes: + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + projected: + sources: + - secret: + name: {{ include "etcd.secretName" . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-snapshotter.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-snapshotter.yaml new file mode 100644 index 00000000..2d8b3f09 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/cronjob-snapshotter.yaml @@ -0,0 +1,171 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.disasterRecovery.enabled -}} +{{- $mergedLabels := mergeOverwrite (dict) .Values.commonLabels .Values.disasterRecovery.cronjob.podLabels -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + concurrencyPolicy: Forbid + schedule: {{ .Values.disasterRecovery.cronjob.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.disasterRecovery.cronjob.historyLimit }} + jobTemplate: + spec: + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $mergedLabels "context" $ ) | nindent 12 }} + app.kubernetes.io/component: snapshotter + {{- if .Values.disasterRecovery.cronjob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.disasterRecovery.cronjob.podAnnotations "context" $) | nindent 12 }} + {{- end }} + spec: + {{- include "etcd.imagePullSecrets" . | nindent 10 }} + {{- if .Values.disasterRecovery.cronjob.nodeSelector }} + nodeSelector: {{- toYaml .Values.disasterRecovery.cronjob.nodeSelector | nindent 12 }} + {{- end }} + {{- if .Values.disasterRecovery.cronjob.tolerations }} + tolerations: {{- toYaml .Values.disasterRecovery.cronjob.tolerations | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + {{- if .Values.disasterRecovery.cronjob.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.disasterRecovery.cronjob.podSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.disasterRecovery.cronjob.serviceAccountName }} + serviceAccountName: {{ .Values.disasterRecovery.cronjob.serviceAccountName | quote }} + {{- end }} + {{- if and .Values.volumePermissions.enabled (or .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled) }} + initContainers: + - name: volume-permissions + image: {{ include "etcd.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /snapshots + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.resources "context" $) | nindent 16 }} + {{- end }} + volumeMounts: + - name: snapshot-volume + mountPath: /snapshots + - name: empty-dir + mountPath: /tmp + {{- end }} + containers: + - name: etcd-snapshotter + image: {{ include "etcd.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.disasterRecovery.cronjob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.disasterRecovery.cronjob.containerSecurityContext "context" $) | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 16 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 16 }} + {{- else if .Values.disasterRecovery.cronjob.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.disasterRecovery.cronjob.command "context" $) | nindent 16 }} + {{- else }} + command: + - /opt/drycc/scripts/etcd/snapshot.sh + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ETCD_ON_K8S + value: "yes" + - name: MY_STS_NAME + value: {{ include "common.names.fullname" . | quote }} + {{- $releaseNamespace := include "common.names.namespace" . }} + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $clusterDomain := .Values.clusterDomain }} + - name: ETCD_CLUSTER_DOMAIN + value: {{ printf "%s.%s.svc.%s" $etcdHeadlessServiceName $releaseNamespace $clusterDomain | quote }} + - name: ETCD_SNAPSHOT_HISTORY_LIMIT + value: {{ .Values.disasterRecovery.cronjob.snapshotHistoryLimit | quote }} + - name: ETCD_SNAPSHOTS_DIR + value: {{ .Values.disasterRecovery.cronjob.snapshotsDir | quote }} + {{- if .Values.auth.client.secureTransport }} + - name: ETCD_CERT_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + - name: ETCD_KEY_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + {{- if .Values.auth.client.enableAuthentication }} + - name: ETCD_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.client.caFilename }} + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename }}" + {{- else }} + - name: ETCD_EXTRA_AUTH_FLAGS + value: "--insecure-skip-tls-verify" + {{- end }} + {{- end }} + {{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + {{- if .Values.usePasswordFiles }} + - name: ETCD_ROOT_PASSWORD_FILE + value: {{ printf "/opt/drycc/etcd/secrets/%s" (include "etcd.secretPasswordKey" .) }} + {{- else }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.disasterRecovery.cronjob.resources }} + resources: {{- toYaml .Values.disasterRecovery.cronjob.resources | nindent 16 }} + {{- else if ne .Values.disasterRecovery.cronjob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.disasterRecovery.cronjob.resourcesPreset) | nindent 16 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: snapshot-volume + mountPath: /snapshots + {{- if .Values.disasterRecovery.pvc.subPath }} + subPath: {{ .Values.disasterRecovery.pvc.subPath }} + {{- end }} + {{- if .Values.auth.client.secureTransport }} + - name: certs + mountPath: /opt/drycc/etcd/certs/client + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + mountPath: /opt/drycc/etcd/secrets + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if .Values.auth.client.secureTransport }} + - name: certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + projected: + sources: + - secret: + name: {{ include "etcd.secretName" . }} + {{- end }} + - name: snapshot-volume + persistentVolumeClaim: + claimName: {{ include "etcd.disasterRecovery.pvc.name" . }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/extra-list.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/extra-list.yaml new file mode 100644 index 00000000..329f5c65 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/networkpolicy.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/networkpolicy.yaml new file mode 100644 index 00000000..16de7639 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/networkpolicy.yaml @@ -0,0 +1,100 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: etcd + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.containerPorts.client }} + - port: {{ .Values.containerPorts.peer }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/component: etcd + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + {{- if eq .Values.service.type "LoadBalancer" }} + - {} + {{- else }} + # Allow inbound connections + - ports: + - port: {{ .Values.containerPorts.client }} + - port: {{ .Values.containerPorts.peer }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + app.kubernetes.io/component: etcd + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: {{ .Values.metrics.useSeparateEndpoint | ternary .Values.containerPorts.metrics .Values.containerPorts.client }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/pdb.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/pdb.yaml new file mode 100644 index 00000000..9380fd20 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if or .Values.pdb.maxUnavailable ( not .Values.pdb.minAvailable ) }} + maxUnavailable: {{ .Values.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: etcd +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/podmonitor.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/podmonitor.yaml new file mode 100644 index 00000000..df5c147d --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/podmonitor.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ ternary .Values.metrics.podMonitor.namespace (include "common.names.namespace" .) (not (empty .Values.metrics.podMonitor.namespace)) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: {{ .Values.metrics.useSeparateEndpoint | ternary "metrics" "client" }} + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.podMonitor.scheme }} + scheme: {{ .Values.metrics.podMonitor.scheme }} + {{- end }} + {{- if .Values.metrics.podMonitor.tlsConfig }} + tlsConfig: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podMonitor.tlsConfig "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.podMonitor.relabelings }} + relabelings: + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: etcd +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/preupgrade-hook-job.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/preupgrade-hook-job.yaml new file mode 100644 index 00000000..3d45f590 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/preupgrade-hook-job.yaml @@ -0,0 +1,208 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.preUpgradeJob.enabled }} +apiVersion: {{ include "common.capabilities.job.apiVersion" . }} +kind: Job +metadata: + name: {{ include "common.names.fullname" . }}-pre-upgrade + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd-pre-upgrade-job + {{- $defaultAnnotations := dict "helm.sh/hook" "pre-upgrade" "helm.sh/hook-delete-policy" "before-hook-creation" }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.preUpgradeJob.annotations .Values.commonAnnotations $defaultAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.preUpgradeJob.podLabels .Values.commonLabels ) "context" . ) }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: etcd-pre-upgrade-job + annotations: + {{- if .Values.preUpgradeJob.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.preUpgradeJob.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "etcd.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "etcd.token.createSecret" .) }} + checksum/token-secret: {{ include (print $.Template.BasePath "/token-secrets.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "etcd.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.preUpgradeJob.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.preUpgradeJob.podAffinityPreset "component" "etcd-pre-upgrade-job" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.preUpgradeJob.podAntiAffinityPreset "component" "etcd-pre-upgrade-job" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.preUpgradeJob.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.preUpgradeJob.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.preUpgradeJob.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + restartPolicy: Never + containers: + {{- $replicaCount := int .Values.replicaCount }} + {{- $clientPort := int .Values.containerPorts.client }} + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $releaseNamespace := include "common.names.namespace" . }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $etcdClientProtocol := include "etcd.clientProtocol" . }} + - name: pre-upgrade-job + image: {{ include "etcd.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.preUpgradeJob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.preUpgradeJob.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: [ "/opt/drycc/scripts/etcd/entrypoint.sh" ] + args: [ "/opt/drycc/scripts/etcd/preupgrade.sh" ] + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ETCD_ON_K8S + value: "yes" + - name: ETCD_DATA_DIR + value: "/drycc/etcd/data" + {{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + {{- if .Values.usePasswordFiles }} + - name: ETCD_ROOT_PASSWORD_FILE + value: {{ printf "/opt/drycc/etcd/secrets/%s" (include "etcd.secretPasswordKey" .) }} + {{- else }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- $initialCluster := list }} + {{- range $e, $i := until $replicaCount }} + {{- $initialCluster = append $initialCluster (printf "%s-%d=%s://%s-%d.%s.%s.svc.%s:%d" $etcdFullname $i $etcdClientProtocol $etcdFullname $i $etcdHeadlessServiceName $releaseNamespace $clusterDomain $clientPort) }} + {{- end }} + - name: ETCD_INITIAL_CLUSTER + value: {{ join "," $initialCluster | quote }} + {{- if and .Values.auth.client.secureTransport .Values.auth.client.useAutoTLS }} + - name: ETCD_AUTO_TLS + value: "true" + {{- else if .Values.auth.client.secureTransport }} + - name: ETCD_CERT_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + - name: ETCD_KEY_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + {{- if .Values.auth.client.enableAuthentication }} + - name: ETCD_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.client.caFilename }} + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename }}" + {{- else }} + - name: ETCD_EXTRA_AUTH_FLAGS + value: "--insecure-skip-tls-verify" + {{- end }} + {{- end }} + {{- if .Values.preUpgradeJob.startDelay }} + - name: ETCD_PREUPGRADE_START_DELAY + value: {{ .Values.preUpgradeJob.startDelay | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.preUpgradeJob.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.preUpgradeJob.resources "context" $) | nindent 12 }} + {{- else if ne .Values.preUpgradeJob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.preUpgradeJob.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: configuration + mountPath: /opt/drycc/etcd/conf/ + {{- else }} + - name: empty-dir + mountPath: /opt/drycc/etcd/conf/ + subPath: app-conf-dir + {{- end }} + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + mountPath: /opt/drycc/etcd/certs/token/ + readOnly: true + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + mountPath: /opt/drycc/etcd/certs/client/ + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + mountPath: /opt/drycc/etcd/secrets + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: configuration + configMap: + name: {{ include "etcd.configmapName" . }} + {{- end }} + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + secret: + secretName: {{ include "etcd.token.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + projected: + sources: + - secret: + name: {{ include "etcd.secretName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/prometheusrule.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/prometheusrule.yaml new file mode 100644 index 00000000..9f2631dc --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/secrets.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/secrets.yaml new file mode 100644 index 00000000..091b40ec --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/secrets.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (or .Values.auth.rbac.create .Values.auth.rbac.enabled) (not .Values.auth.rbac.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "etcd.secretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{ include "etcd.secretPasswordKey" .}}: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" (include "etcd.secretPasswordKey" .) "providedValues" (list "auth.rbac.rootPassword") "context" $) }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/serviceaccount.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/serviceaccount.yaml new file mode 100644 index 00000000..48ee3d2a --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ include "etcd.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/snapshot-pvc.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/snapshot-pvc.yaml new file mode 100644 index 00000000..8477d189 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/snapshot-pvc.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.disasterRecovery.enabled (not .Values.disasterRecovery.pvc.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + annotations: + helm.sh/resource-policy: keep + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.disasterRecovery.pvc.size | quote }} + storageClassName: {{ .Values.disasterRecovery.pvc.storageClassName | quote }} +{{- end -}} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/statefulset.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/statefulset.yaml new file mode 100644 index 00000000..f998ea09 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/statefulset.yaml @@ -0,0 +1,470 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: etcd + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: etcd + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "etcd.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "etcd.token.createSecret" .) }} + checksum/token-secret: {{ include (print $.Template.BasePath "/token-secrets.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "etcd.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "etcd" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "etcd" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.shareProcessNamespace }} + {{- end }} + serviceAccountName: {{ include "etcd.serviceAccountName" $ | quote }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "etcd.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /drycc/etcd + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.resources "context" $) | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /drycc/etcd + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + {{- end }} + {{- end }} + containers: + {{- $replicaCount := int .Values.replicaCount }} + {{- $peerPort := int .Values.containerPorts.peer }} + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $releaseNamespace := include "common.names.namespace" . }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $etcdPeerProtocol := include "etcd.peerProtocol" . }} + {{- $etcdClientProtocol := include "etcd.clientProtocol" . }} + - name: etcd + image: {{ include "etcd.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_STS_NAME + value: {{ include "common.names.fullname" . | quote }} + - name: ETCD_ON_K8S + value: "yes" + - name: ETCD_START_FROM_SNAPSHOT + value: {{ ternary "yes" "no" .Values.startFromSnapshot.enabled | quote }} + - name: ETCD_DISASTER_RECOVERY + value: {{ ternary "yes" "no" .Values.disasterRecovery.enabled | quote }} + - name: ETCD_NAME + value: "$(MY_POD_NAME)" + - name: ETCD_DATA_DIR + value: "/drycc/etcd/data" + - name: ETCD_LOG_LEVEL + value: {{ ternary "debug" .Values.logLevel .Values.image.debug | quote }} + - name: ALLOW_NONE_AUTHENTICATION + value: {{ ternary "yes" "no" (and (not (or .Values.auth.rbac.create .Values.auth.rbac.enabled)) .Values.auth.rbac.allowNoneAuthentication) | quote }} + {{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + {{- if .Values.usePasswordFiles }} + - name: ETCD_ROOT_PASSWORD_FILE + value: {{ printf "/opt/drycc/etcd/secrets/%s" (include "etcd.secretPasswordKey" .) }} + {{- else }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.auth.token.enabled }} + - name: ETCD_AUTH_TOKEN + {{- if eq .Values.auth.token.type "jwt" }} + value: {{ printf "jwt,priv-key=/opt/drycc/etcd/certs/token/%s,sign-method=%s,ttl=%s" .Values.auth.token.privateKey.filename .Values.auth.token.signMethod .Values.auth.token.ttl | quote }} + {{- else if eq .Values.auth.token.type "simple" }} + value: "simple" + {{- end }} + {{- end }} + - name: ETCD_ADVERTISE_CLIENT_URLS + value: "{{ $etcdClientProtocol }}://$(MY_POD_NAME).{{ $etcdHeadlessServiceName }}.{{ include "common.names.namespace" . }}.svc.{{ $clusterDomain }}:{{ .Values.containerPorts.client }}{{- if .Values.service.enabled }},{{ $etcdClientProtocol }}://{{ $etcdFullname }}.{{ include "common.names.namespace" . }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }}{{- end }}" + - name: ETCD_LISTEN_CLIENT_URLS + value: "{{ $etcdClientProtocol }}://0.0.0.0:{{ .Values.containerPorts.client }}" + - name: ETCD_INITIAL_ADVERTISE_PEER_URLS + value: "{{ $etcdPeerProtocol }}://$(MY_POD_NAME).{{ $etcdHeadlessServiceName }}.{{ include "common.names.namespace" . }}.svc.{{ $clusterDomain }}:{{ .Values.containerPorts.peer }}" + - name: ETCD_LISTEN_PEER_URLS + value: "{{ $etcdPeerProtocol }}://0.0.0.0:{{ .Values.containerPorts.peer }}" + {{- if .Values.metrics.useSeparateEndpoint }} + - name: ETCD_LISTEN_METRICS_URLS + value: "http://0.0.0.0:{{ .Values.containerPorts.metrics }}" + {{- end }} + {{- if .Values.autoCompactionMode }} + - name: ETCD_AUTO_COMPACTION_MODE + value: {{ .Values.autoCompactionMode | quote }} + {{- end }} + {{- if .Values.autoCompactionRetention }} + - name: ETCD_AUTO_COMPACTION_RETENTION + value: {{ .Values.autoCompactionRetention | quote }} + {{- end }} + {{- if .Values.maxProcs }} + - name: GOMAXPROCS + value: {{ .Values.maxProcs | quote }} + {{- end }} + - name: ETCD_INITIAL_CLUSTER_TOKEN + value: {{ .Values.initialClusterToken | quote }} + {{- $initialCluster := list }} + {{- range $e, $i := until $replicaCount }} + {{- $initialCluster = append $initialCluster (printf "%s-%d=%s://%s-%d.%s.%s.svc.%s:%d" $etcdFullname $i $etcdPeerProtocol $etcdFullname $i $etcdHeadlessServiceName $releaseNamespace $clusterDomain $peerPort) }} + {{- end }} + - name: ETCD_INITIAL_CLUSTER + value: {{ join "," $initialCluster | quote }} + - name: ETCD_CLUSTER_DOMAIN + value: {{ printf "%s.%s.svc.%s" $etcdHeadlessServiceName $releaseNamespace $clusterDomain | quote }} + {{- if and .Values.auth.client.secureTransport .Values.auth.client.useAutoTLS }} + - name: ETCD_AUTO_TLS + value: "true" + {{- else if .Values.auth.client.secureTransport }} + - name: ETCD_CERT_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + - name: ETCD_KEY_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + {{- if .Values.auth.client.enableAuthentication }} + - name: ETCD_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.client.caFilename }} + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- end }} + {{- end }} + {{- if and .Values.auth.peer.secureTransport .Values.auth.peer.useAutoTLS }} + - name: ETCD_PEER_AUTO_TLS + value: "true" + {{- else if .Values.auth.peer.secureTransport }} + - name: ETCD_PEER_CERT_FILE + value: "/opt/drycc/etcd/certs/peer/{{ .Values.auth.peer.certFilename }}" + - name: ETCD_PEER_KEY_FILE + value: "/opt/drycc/etcd/certs/peer/{{ .Values.auth.peer.certKeyFilename }}" + {{- if .Values.auth.peer.enableAuthentication }} + - name: ETCD_PEER_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_PEER_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/peer/{{ .Values.auth.peer.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.peer.caFilename }} + - name: ETCD_PEER_TRUSTED_CA_FILE + value: "/opt/drycc/etcd/certs/peer/{{ .Values.auth.peer.caFilename | default "ca.crt" }}" + {{- end }} + {{- end }} + {{- if .Values.startFromSnapshot.enabled }} + - name: ETCD_INIT_SNAPSHOT_FILENAME + value: {{ .Values.startFromSnapshot.snapshotFilename | quote }} + - name: ETCD_INIT_SNAPSHOTS_DIR + value: {{ ternary "/snapshots" "/init-snapshot" (and .Values.disasterRecovery.enabled (not .Values.disasterRecovery.pvc.existingClaim)) | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.containerPorts.client }} + protocol: TCP + - name: peer + containerPort: {{ .Values.containerPorts.peer }} + protocol: TCP + {{- if .Values.metrics.useSeparateEndpoint }} + - name: metrics + containerPort: {{ .Values.containerPorts.metrics }} + protocol: TCP + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + {{- if .Values.auth.client.secureTransport }} + exec: + command: + - /opt/drycc/scripts/etcd/healthcheck.sh + {{- else }} + httpGet: + port: {{ .Values.metrics.useSeparateEndpoint | ternary .Values.containerPorts.metrics .Values.containerPorts.client }} + path: /livez + scheme: "HTTP" + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /opt/drycc/scripts/etcd/healthcheck.sh + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + exec: + command: + - /opt/drycc/scripts/etcd/healthcheck.sh + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: configuration + mountPath: /opt/drycc/etcd/conf/ + {{- else }} + - name: empty-dir + mountPath: /opt/drycc/etcd/conf/ + subPath: app-conf-dir + {{- end }} + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: data + mountPath: /drycc/etcd + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + mountPath: /opt/drycc/etcd/certs/token/ + readOnly: true + {{- end }} + {{- if or (and .Values.startFromSnapshot.enabled (not .Values.disasterRecovery.enabled)) (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled .Values.disasterRecovery.pvc.existingClaim) }} + - name: init-snapshot-volume + mountPath: /init-snapshot + {{- end }} + {{- if .Values.disasterRecovery.enabled }} + - name: snapshot-volume + mountPath: /snapshots + {{- if .Values.disasterRecovery.pvc.subPath }} + subPath: {{ .Values.disasterRecovery.pvc.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + mountPath: /opt/drycc/etcd/certs/client/ + readOnly: true + {{- end }} + {{- if or .Values.auth.peer.enableAuthentication (and .Values.auth.peer.secureTransport (not .Values.auth.peer.useAutoTLS )) }} + - name: etcd-peer-certs + mountPath: /opt/drycc/etcd/certs/peer/ + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + mountPath: /opt/drycc/etcd/secrets + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: {} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: configuration + configMap: + name: {{ include "etcd.configmapName" . }} + {{- end }} + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + secret: + secretName: {{ include "etcd.token.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if or (and .Values.startFromSnapshot.enabled (not .Values.disasterRecovery.enabled)) (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled .Values.disasterRecovery.pvc.existingClaim) }} + - name: init-snapshot-volume + persistentVolumeClaim: + claimName: {{ .Values.startFromSnapshot.existingClaim }} + {{- end }} + {{- if or .Values.disasterRecovery.enabled (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled) }} + - name: snapshot-volume + persistentVolumeClaim: + claimName: {{ include "etcd.disasterRecovery.pvc.name" . }} + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if or .Values.auth.peer.enableAuthentication (and .Values.auth.peer.secureTransport (not .Values.auth.peer.useAutoTLS )) }} + - name: etcd-peer-certs + secret: + secretName: {{ required "A secret containing the peer certificates is required" (tpl .Values.auth.peer.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if and .Values.usePasswordFiles (or .Values.auth.rbac.create .Values.auth.rbac.enabled) }} + - name: etcd-secrets + projected: + sources: + - secret: + name: {{ include "etcd.secretName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.extraVolumeClaimTemplates }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeClaimTemplates "context" $) | nindent 4 }} + {{- end }} + {{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/svc-headless.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/svc-headless.yaml new file mode 100644 index 00000000..4e47160a --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/svc-headless.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + {{- if .Values.service.clientPortNameOverride }} + {{- if .Values.auth.client.secureTransport }} + - name: {{ .Values.service.clientPortNameOverride }}-ssl + {{- else }} + - name: {{ .Values.service.clientPortNameOverride }} + {{- end }} + {{- else }} + - name: client + {{- end }} + port: {{ .Values.containerPorts.client }} + targetPort: client + {{- if .Values.service.peerPortNameOverride }} + {{- if .Values.auth.peer.secureTransport }} + - name: {{ .Values.service.peerPortNameOverride }}-ssl + {{- else }} + - name: {{ .Values.service.peerPortNameOverride }} + {{- end }} + {{- else }} + - name: peer + {{- end }} + port: {{ .Values.containerPorts.peer }} + targetPort: peer + {{- if .Values.metrics.useSeparateEndpoint }} + {{- if .Values.service.metricsPortNameOverride }} + - name: {{ .Values.service.metricsPortNameOverride }} + {{- else }} + - name: metrics + {{- end }} + port: {{ .Values.containerPorts.metrics }} + targetPort: metrics + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/svc.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/svc.yaml new file mode 100644 index 00000000..1d534661 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/svc.yaml @@ -0,0 +1,77 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.service.loadBalancerClass }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: {{ default "client" .Values.service.clientPortNameOverride | quote }} + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty (.Values.service.nodePorts.client))) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: {{ default "peer" .Values.service.peerPortNameOverride | quote }} + port: {{ .Values.service.ports.peer }} + targetPort: peer + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty (.Values.service.nodePorts.peer))) }} + nodePort: {{ .Values.service.nodePorts.peer }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.useSeparateEndpoint }} + - name: {{ default "metrics" .Values.service.metricsPortNameOverride | quote }} + port: {{ .Values.service.ports.metrics }} + targetPort: metrics + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty (.Values.service.nodePorts.metrics))) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: etcd +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/templates/token-secrets.yaml b/addons/etcd/3.6/chart/etcd-3.6/templates/token-secrets.yaml new file mode 100644 index 00000000..cd56f47c --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/templates/token-secrets.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "etcd.token.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "etcd.token.secretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + jwt-token.pem: {{ include "etcd.token.jwtToken" . | b64enc | quote }} +{{- end }} diff --git a/addons/etcd/3.6/chart/etcd-3.6/values.yaml b/addons/etcd/3.6/chart/etcd-3.6/values.yaml new file mode 100644 index 00000000..f7662506 --- /dev/null +++ b/addons/etcd/3.6/chart/etcd-3.6/values.yaml @@ -0,0 +1,1275 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets [array] Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace template +## +namespaceOverride: "" +## @param commonLabels [object] Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations [object] Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy [array] Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param usePasswordFiles Mount credentials as files instead of using environment variables +## +usePasswordFiles: true +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity +## @section etcd parameters +## + +## Bitnami etcd image version +## ref: https://hub.docker.com/r/bitnami/etcd/tags/ +## @param image.registry [default: REGISTRY_NAME] etcd image registry +## @param image.repository [default: REPOSITORY_NAME/etcd] etcd image name +## @skip image.tag etcd image tag +## @param image.digest etcd image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## +image: + registry: registry.drycc.cc + repository: drycc-addons/etcd + tag: "3.6" + digest: "" + ## @param image.pullPolicy etcd image pull policy + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## @param image.pullSecrets [array] etcd image pull secrets + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param image.debug Enable image debug mode + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + ## Role-based access control parameters + ## ref: https://etcd.io/docs/current/op-guide/authentication/ + ## + rbac: + ## @param auth.rbac.create Switch to enable RBAC authentication + ## + create: true + ## @param auth.rbac.allowNoneAuthentication Allow to use etcd without configuring RBAC authentication + ## + allowNoneAuthentication: true + ## @param auth.rbac.rootPassword Root user password. The root user is always `root` + ## + rootPassword: "" + ## @param auth.rbac.existingSecret Name of the existing secret containing credentials for the root user + ## + existingSecret: "" + ## @param auth.rbac.existingSecretPasswordKey Name of key containing password to be retrieved from the existing secret + ## + existingSecretPasswordKey: "" + ## Authentication token + ## ref: https://etcd.io/docs/latest/learning/design-auth-v3/#two-types-of-tokens-simple-and-jwt + ## + token: + ## @param auth.token.enabled Enables token authentication + ## + enabled: true + ## @param auth.token.type Authentication token type. Allowed values: 'simple' or 'jwt' + ## ref: https://etcd.io/docs/latest/op-guide/configuration/#--auth-token + ## + type: jwt + ## @param auth.token.privateKey.filename Name of the file containing the private key for signing the JWT token + ## @param auth.token.privateKey.existingSecret Name of the existing secret containing the private key for signing the JWT token + ## NOTE: Ignored if auth.token.type=simple + ## NOTE: A secret containing a private key will be auto-generated if an existing one is not provided. + ## + privateKey: + filename: jwt-token.pem + existingSecret: "" + ## @param auth.token.signMethod JWT token sign method + ## NOTE: Ignored if auth.token.type=simple + ## + signMethod: RS256 + ## @param auth.token.ttl JWT token TTL + ## NOTE: Ignored if auth.token.type=simple + ## + ttl: 10m + ## TLS authentication for client-to-server communications + ## ref: https://etcd.io/docs/current/op-guide/security/ + ## + client: + ## @param auth.client.secureTransport Switch to encrypt client-to-server communications using TLS certificates + ## + secureTransport: false + ## @param auth.client.useAutoTLS Switch to automatically create the TLS certificates + ## + useAutoTLS: false + ## @param auth.client.existingSecret Name of the existing secret containing the TLS certificates for client-to-server communications + ## + existingSecret: "" + ## @param auth.client.enableAuthentication Switch to enable host authentication using TLS certificates. Requires existing secret + ## + enableAuthentication: false + ## @param auth.client.certFilename Name of the file containing the client certificate + ## + certFilename: cert.pem + ## @param auth.client.certKeyFilename Name of the file containing the client certificate private key + ## + certKeyFilename: key.pem + ## @param auth.client.caFilename Name of the file containing the client CA certificate + ## If not specified and `auth.client.enableAuthentication=true` or `auth.rbac.enabled=true`, the default is is `ca.crt` + ## + caFilename: "" + ## TLS authentication for server-to-server communications + ## ref: https://etcd.io/docs/current/op-guide/security/ + ## + peer: + ## @param auth.peer.secureTransport Switch to encrypt server-to-server communications using TLS certificates + ## + secureTransport: false + ## @param auth.peer.useAutoTLS Switch to automatically create the TLS certificates + ## + useAutoTLS: false + ## @param auth.peer.existingSecret Name of the existing secret containing the TLS certificates for server-to-server communications + ## + existingSecret: "" + ## @param auth.peer.enableAuthentication Switch to enable host authentication using TLS certificates. Requires existing secret + ## + enableAuthentication: false + ## @param auth.peer.certFilename Name of the file containing the peer certificate + ## + certFilename: cert.pem + ## @param auth.peer.certKeyFilename Name of the file containing the peer certificate private key + ## + certKeyFilename: key.pem + ## @param auth.peer.caFilename Name of the file containing the peer CA certificate + ## If not specified and `auth.peer.enableAuthentication=true` or `rbac.enabled=true`, the default is is `ca.crt` + ## + caFilename: "" +## @param autoCompactionMode Auto compaction mode, by default periodic. Valid values: "periodic", "revision". +## - 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. 5m). +## - 'revision' for revision number based retention. +## +autoCompactionMode: "" +## @param autoCompactionRetention Auto compaction retention for mvcc key value store in hour, by default 0, means disabled +## +autoCompactionRetention: "" +## @param initialClusterToken Initial cluster token. Can be used to protect etcd from cross-cluster-interaction, which might corrupt the clusters. +## If spinning up multiple clusters (or creating and destroying a single cluster) +## with same configuration for testing purpose, it is highly recommended that each cluster is given a unique initial-cluster-token. +## By doing this, etcd can generate unique cluster IDs and member IDs for the clusters even if they otherwise have the exact same configuration. +## +initialClusterToken: "etcd-cluster-k8s" +## @param logLevel Sets the log level for the etcd process. Allowed values: 'debug', 'info', 'warn', 'error', 'panic', 'fatal' +## +logLevel: "info" +## @param maxProcs Limits the number of operating system threads that can execute user-level +## Go code simultaneously by setting GOMAXPROCS environment variable +## ref: https://golang.org/pkg/runtime +## +maxProcs: "" +## @param configuration etcd configuration. Specify content for etcd.conf.yml +## e.g: +## configuration: |- +## foo: bar +## baz: +## +configuration: "" +## @param existingConfigmap Existing ConfigMap with etcd configuration +## NOTE: When it's set the configuration parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars [array] Extra environment variables to be set on etcd container +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars +## +extraEnvVarsSecret: "" +## @param command [array] Default container command (useful when using custom images) +## +command: [] +## @param args [array] Default container args (useful when using custom images) +## +args: [] +## @section etcd statefulset parameters +## + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 +## Update strategy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## @param updateStrategy.type Update strategy type, can be set to RollingUpdate or OnDelete. +## +updateStrategy: + type: RollingUpdate +## @param podManagementPolicy Pod management policy for the etcd statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +## +podManagementPolicy: Parallel +## @param automountServiceAccountToken Mount Service Account token in pod +## +automountServiceAccountToken: false +## @param hostAliases [array] etcd pod host aliases +## ref: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param lifecycleHooks [object] Override default etcd container hooks +## +lifecycleHooks: {} +## etcd container ports to open +## @param containerPorts.client Client port to expose at container level +## @param containerPorts.peer Peer port to expose at container level +## @param containerPorts.metrics Metrics port to expose at container level when metrics.useSeparateEndpoint is true +## +containerPorts: + client: 2379 + peer: 2380 + metrics: 9090 +## etcd pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled etcd pods' Security Context +## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy +## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface +## @param podSecurityContext.supplementalGroups Set filesystem extra groups +## @param podSecurityContext.fsGroup Set etcd pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 +## etcd containers' SecurityContext +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled etcd containers' Security Context +## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container +## @param containerSecurityContext.runAsUser Set etcd containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set etcd containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Controller container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set primary container's Security Context privileged +## @param containerSecurityContext.allowPrivilegeEscalation Set primary container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## +containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). +## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 +## +resourcesPreset: "micro" +## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) +## Example: +## resources: +## requests: +## cpu: 2 +## memory: 512Mi +## limits: +## cpu: 3 +## memory: 1024Mi +## +resources: {} +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 60 +## @param customLivenessProbe [object] Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe [object] Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe [object] Override default startup probe +## +customStartupProbe: {} +## @param extraVolumes [array] Optionally specify extra list of additional volumes for etcd pods +## +extraVolumes: [] +## @param extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for etcd container(s) +## +extraVolumeMounts: [] +## @param extraVolumeClaimTemplates [array] Optionally specify extra list of additional volumeClaimTemplates for etcd container(s) +## +extraVolumeClaimTemplates: [] +## @param initContainers [array] Add additional init containers to the etcd pods +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars [array] Add additional sidecar containers to the etcd pods +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param podAnnotations [object] Annotations for etcd pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podLabels [object] Extra labels for etcd pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. +## @param nodeAffinityPreset.values [array] Node label values to match. Ignored if `affinity` is set. +## +nodeAffinityPreset: + type: "" + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity [object] Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector [object] Node labels for pod assignment +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +## +nodeSelector: {} +## @param tolerations [array] Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param priorityClassName Name of the priority class to be used by etcd pods +## Priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param runtimeClassName Name of the runtime class to be used by pod(s) +## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ +## +runtimeClassName: "" +## @param shareProcessNamespace Enable shared process namespace in a pod. +## If set to false (default), each container will run in separate namespace, etcd will have PID=1. +## If set to true, the /pause will run as init process and will reap any zombie PIDs, +## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. +## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ +## +shareProcessNamespace: false +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment +## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## The value is evaluated as a template +## +topologySpreadConstraints: [] +## persistentVolumeClaimRetentionPolicy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet +## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced +## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted +persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete +## @section Traffic exposure parameters +## + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.enabled create second service if equal true + ## + enabled: true + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.ports.client etcd client port + ## @param service.ports.peer etcd peer port + ## @param service.ports.metrics etcd metrics port when metrics.useSeparateEndpoint is true + ## + ports: + client: 2379 + peer: 2380 + metrics: 9090 + ## @param service.nodePorts.client Specify the nodePort client value for the LoadBalancer and NodePort service types. + ## @param service.nodePorts.peer Specify the nodePort peer value for the LoadBalancer and NodePort service types. + ## @param service.nodePorts.metrics Specify the nodePort metrics value for the LoadBalancer and NodePort service types. The metrics port is only exposed when metrics.useSeparateEndpoint is true. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + peer: "" + metrics: "" + ## @param service.clientPortNameOverride etcd client port name override + ## + clientPortNameOverride: "" + ## @param service.peerPortNameOverride etcd peer port name override + ## + peerPortNameOverride: "" + ## @param service.metricsPortNameOverride etcd metrics port name override. The metrics port is only exposed when metrics.useSeparateEndpoint is true. + ## + metricsPortNameOverride: "" + ## @param service.loadBalancerIP loadBalancerIP for the etcd service (optional, cloud specific) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerClass loadBalancerClass for the etcd service (optional, cloud specific) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param service.loadBalancerSourceRanges [array] Load Balancer source ranges + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalIPs [array] External IPs + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.externalTrafficPolicy %%MAIN_CONTAINER_NAME%% service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations [object] Additional annotations for the etcd service + ## + annotations: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} +## @section Persistence parameters +## + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ +## +persistence: + ## @param persistence.enabled If true, use a Persistent Volume Claim. If false, use emptyDir. + ## + enabled: true + ## @param persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## + ## @param persistence.annotations [object] Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels [object] Labels for the PVC + ## + labels: {} + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## + size: 8Gi + ## @param persistence.selector [object] Selector to match an existing Persistent Volume + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + selector: {} +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name + ## @skip volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: "trixie" + digest: "" + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## + pullPolicy: IfNotPresent + ## @param volumePermissions.image.pullSecrets [array] Specify docker-registry secret names as an array + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} +## @section Network Policy parameters +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## etcd is listening on. When true, etcd will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + allowCurrentNamespace: true + allowNamespaces: [] + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## @section Metrics parameters +## +metrics: + ## @param metrics.enabled Expose etcd metrics + ## + enabled: false + ## @param metrics.useSeparateEndpoint Use a separate endpoint for exposing metrics + # + useSeparateEndpoint: false + ## @param metrics.podAnnotations [object] Annotations for the Prometheus metrics on etcd pods + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.useSeparateEndpoint | ternary .Values.containerPorts.metrics .Values.containerPorts.client }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace Namespace in which Prometheus is running + ## + namespace: monitoring + ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## + scrapeTimeout: 30s + ## @param metrics.podMonitor.additionalLabels [object] Additional labels that can be used so PodMonitors will be discovered by Prometheus + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.podMonitor.scheme Scheme to use for scraping + ## + scheme: http + ## @param metrics.podMonitor.tlsConfig [object] TLS configuration used for scrape endpoints used by Prometheus + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + ## e.g: + ## tlsConfig: + ## ca: + ## secret: + ## name: existingSecretName + ## + tlsConfig: {} + ## @param metrics.podMonitor.relabelings [array] Prometheus relabeling rules + ## + relabelings: [] + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + # - alert: ETCD has no leader + # annotations: + # summary: "ETCD has no leader" + # description: "pod {{`{{`}} $labels.pod {{`}}`}} state error, can't connect leader" + # for: 1m + # expr: etcd_server_has_leader == 0 + # labels: + # severity: critical + # group: PaaS + ## + rules: [] +## @section Snapshotting parameters +## + +## Start a new etcd cluster recovering the data from an existing snapshot before bootstrapping +## +startFromSnapshot: + ## @param startFromSnapshot.enabled Initialize new cluster recovering an existing snapshot + ## + enabled: false + ## @param startFromSnapshot.existingClaim Existing PVC containing the etcd snapshot + ## + existingClaim: "" + ## @param startFromSnapshot.snapshotFilename Snapshot filename + ## + snapshotFilename: "" +## Enable auto disaster recovery by periodically snapshotting the keyspace: +## - It creates a cronjob to periodically snapshotting the keyspace +## - It also creates a ReadWriteMany PVC to store the snapshots +## If the cluster permanently loses more than (N-1)/2 members, it tries to +## recover itself from the last available snapshot. +## +disasterRecovery: + ## @param disasterRecovery.enabled Enable auto disaster recovery by periodically snapshotting the keyspace + ## + enabled: false + cronjob: + ## @param disasterRecovery.cronjob.schedule Schedule in Cron format to save snapshots + ## See https://en.wikipedia.org/wiki/Cron + ## + schedule: "*/30 * * * *" + ## @param disasterRecovery.cronjob.historyLimit Number of successful finished jobs to retain + ## + historyLimit: 1 + ## @param disasterRecovery.cronjob.snapshotHistoryLimit Number of etcd snapshots to retain, tagged by date + ## + snapshotHistoryLimit: 1 + ## @param disasterRecovery.cronjob.snapshotsDir Directory to store snapshots + ## + snapshotsDir: "/snapshots" + ## @param disasterRecovery.cronjob.podAnnotations [object] Pod annotations for cronjob pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## K8s Security Context for Snapshotter cronjob pods + ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param disasterRecovery.cronjob.podSecurityContext.enabled Enable security context for Snapshotter pods + ## @param disasterRecovery.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param disasterRecovery.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param disasterRecovery.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param disasterRecovery.cronjob.podSecurityContext.fsGroup Group ID for the Snapshotter filesystem + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure container security context for Snapshotter cronjob containers + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param disasterRecovery.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param disasterRecovery.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param disasterRecovery.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param disasterRecovery.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param disasterRecovery.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param disasterRecovery.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param disasterRecovery.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param disasterRecovery.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param disasterRecovery.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param disasterRecovery.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure resource requests and limits for snapshotter containers + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param disasterRecovery.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if disasterRecovery.cronjob.resources is set (disasterRecovery.cronjob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param disasterRecovery.cronjob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param disasterRecovery.cronjob.nodeSelector Node labels for cronjob pods assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param disasterRecovery.cronjob.tolerations Tolerations for cronjob pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param disasterRecovery.cronjob.podLabels [object] Labels that will be added to pods created by cronjob + ## + podLabels: {} + ## @param disasterRecovery.cronjob.serviceAccountName Specifies the service account to use for disaster recovery cronjob + ## + serviceAccountName: "" + ## @param disasterRecovery.cronjob.command Override default snapshot container command (useful when you want to customize the snapshot logic) + ## + command: [] + ## + pvc: + ## @param disasterRecovery.pvc.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param disasterRecovery.pvc.size PVC Storage Request + ## + size: 2Gi + ## @param disasterRecovery.pvc.storageClassName Storage Class for snapshots volume + ## + storageClassName: nfs + ## @param disasterRecovery.pvc.subPath Path within the volume from which to mount + ## Useful if snapshots should only be stored in a subdirectory of the volume + ## + subPath: "" +## @section Service account parameters +## +serviceAccount: + ## @param serviceAccount.create Enable/disable service account creation + ## + create: true + ## @param serviceAccount.name Name of the service account to create or use + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations [object] Additional annotations to be included on the service account + ## + annotations: {} + ## @param serviceAccount.labels [object] Additional labels to be included on the service account + ## + labels: {} + +## @section etcd "pre-upgrade" K8s Job parameters +## +preUpgradeJob: + ## @param preUpgradeJob.enabled Enable running a pre-upgrade job on Helm upgrades that removes obsolete members + ## + enabled: true + ## @param preUpgradeJob.annotations [object] Add annotations to the etcd "pre-upgrade" job + ## + annotations: {} + ## @param preUpgradeJob.podLabels Additional pod labels for etcd "pre-upgrade" job + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param preUpgradeJob.podAnnotations Additional pod annotations for etcd "pre-upgrade" job + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param preUpgradeJob.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param preUpgradeJob.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param preUpgradeJob.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param preUpgradeJob.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. + ## @param preUpgradeJob.nodeAffinityPreset.values [array] Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param preUpgradeJob.affinity [object] Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param preUpgradeJob.nodeSelector [object] Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param preUpgradeJob.tolerations [array] Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## Configure "pre-upgrade" job's container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param preUpgradeJob.containerSecurityContext.enabled Enabled "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "pre-upgrade" job's containers + ## @param preUpgradeJob.containerSecurityContext.runAsUser Set runAsUser in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.runAsGroup Set runAsUser in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.privileged Set privileged in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "pre-upgrade" job's containers' Security Context + ## @param preUpgradeJob.containerSecurityContext.capabilities.add List of capabilities to be added in "pre-upgrade" job's containers + ## @param preUpgradeJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "pre-upgrade" job's containers + ## @param preUpgradeJob.containerSecurityContext.seccompProfile.type Set seccomp profile in "pre-upgrade" job's containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure "pre-upgrade" job's pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param preUpgradeJob.podSecurityContext.enabled Enabled "pre-upgrade" job's pods' Security Context + ## @param preUpgradeJob.podSecurityContext.fsGroupChangePolicy Set fsGroupChangePolicy in "pre-upgrade" job's pods' Security Context + ## @param preUpgradeJob.podSecurityContext.sysctls List of sysctls to allow in "pre-upgrade" job's pods' Security Context + ## @param preUpgradeJob.podSecurityContext.supplementalGroups List of supplemental groups to add to "pre-upgrade" job's pods' Security Context + ## @param preUpgradeJob.podSecurityContext.fsGroup Set fsGroup in "pre-upgrade" job's pods' Security Context + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## etcd "pre-upgrade" job's container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param preUpgradeJob.resourcesPreset Set etcd "pre-upgrade" job's container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if preUpgradeJob.resources is set (preUpgradeJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param preUpgradeJob.resources Set etcd "pre-upgrade" job's container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## etcd "pre-upgrade" job's optional delay + ## @param preUpgradeJob.startDelay Optional delay before starting the pre-upgrade hook (in seconds). + startDelay: "" + +## @section Defragmentation parameters +## + +## Enable defragmentation by periodically rearranging fragmented data after history compaction. +## It creates a cronjob to periodically run the defragmentation command: +## etcdctl defrag [OPTIONS] +## See https://etcd.io/docs/latest/op-guide/maintenance/ +## +defrag: + ## @param defrag.enabled Enable automatic defragmentation. This is most effective when paired with auto compaction: consider setting "autoCompactionRetention > 0". + ## + enabled: false + cronjob: + ## @param defrag.cronjob.startingDeadlineSeconds Number of seconds representing the deadline for starting the job if it misses scheduled time for any reason + ## + startingDeadlineSeconds: "" + ## @param defrag.cronjob.schedule Schedule in Cron format to defrag (daily at midnight by default) + ## See https://en.wikipedia.org/wiki/Cron + ## + schedule: "0 0 * * *" + ## @param defrag.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + ## + concurrencyPolicy: Forbid + ## @param defrag.cronjob.suspend Boolean that indicates if the controller must suspend subsequent executions (not applied to already started executions) + ## + suspend: false + ## @param defrag.cronjob.successfulJobsHistoryLimit Number of successful finished jobs to retain + ## + successfulJobsHistoryLimit: 1 + ## @param defrag.cronjob.failedJobsHistoryLimit Number of failed finished jobs to retain + ## + failedJobsHistoryLimit: 1 + ## @param defrag.cronjob.labels [object] Additional labels to be added to the Defrag cronjob + ## + labels: {} + ## @param defrag.cronjob.annotations [object] Annotations to be added to the Defrag cronjob + ## + annotations: {} + ## @param defrag.cronjob.activeDeadlineSeconds Number of seconds relative to the startTime that the job may be continuously active before the system tries to terminate it + ## + activeDeadlineSeconds: "" + ## @param defrag.cronjob.restartPolicy Set the cronjob parameter restartPolicy + ## + restartPolicy: OnFailure + ## @param defrag.cronjob.podLabels [object] Labels that will be added to pods created by Defrag cronjob + ## + podLabels: {} + ## @param defrag.cronjob.podAnnotations [object] Pod annotations for Defrag cronjob pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## K8s Security Context for Defrag cronjob pods + ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param defrag.cronjob.podSecurityContext.enabled Enable security context for Defrag pods + ## @param defrag.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param defrag.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param defrag.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param defrag.cronjob.podSecurityContext.fsGroup Group ID for the Defrag filesystem + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure container security context for Defrag cronjob containers + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param defrag.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param defrag.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param defrag.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param defrag.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param defrag.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param defrag.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param defrag.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param defrag.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param defrag.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param defrag.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param defrag.cronjob.nodeSelector [object] Node labels for pod assignment in Defrag cronjob + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param defrag.cronjob.tolerations [array] Tolerations for pod assignment in Defrag cronjob + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param defrag.cronjob.serviceAccountName Specifies the service account to use for Defrag cronjob + ## + serviceAccountName: "" + ## @param defrag.cronjob.command [array] Override default container command for defragmentation (useful when using custom images) + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + command: [] + ## @param defrag.cronjob.args [array] Override default container args (useful when using custom images) + ## + args: [] + ## @param defrag.cronjob.resourcesPreset Set container resources according to one common preset + ## (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if + ## defrag.cronjob.resources is set (defrag.cronjob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defrag.cronjob.resources [object] Set container requests and limits for different resources like CPU or + ## memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param defrag.cronjob.extraEnvVars [array] Extra environment variables to be set on defrag cronjob container + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param defrag.cronjob.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param defrag.cronjob.extraEnvVarsSecret Name of existing Secret containing extra env vars + ## + extraEnvVarsSecret: "" + +## @section Other parameters +## + +## etcd Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: true + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 51% + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" diff --git a/addons/etcd/3.6/meta.yaml b/addons/etcd/3.6/meta.yaml new file mode 100644 index 00000000..c6f1a95d --- /dev/null +++ b/addons/etcd/3.6/meta.yaml @@ -0,0 +1,63 @@ +name: etcd-3.6 +version: "3.6" +id: 74c7112f-bd4a-4ecf-bfec-ca9443b4815a +description: "A distributed, reliable key-value store for the most critical data of a distributed system" +displayName: "etcd-3.6" +metadata: + displayName: "etcd-3.6" + provider: + name: drycc + supportURL: https://etcd.io/ + documentationURL: https://etcd.io/ +tags: etcd +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "extraDeploy" + required: false + description: "extraDeploy config for values.yaml" +- name: "nodeSelector" + required: false + description: "nodeSelector config for values.yaml" +- name: "auth" + required: false + description: "auth config for values.yaml" +- name: "maxProcs" + required: false + description: "maxProcs config for values.yaml" +- name: "configuration" + required: false + description: "configuration config for values.yaml" +- name: "extraEnvVars" + required: false + description: "extraEnvVars config for values.yaml" +- name: "externalTrafficPolicy" + required: false + description: "externalTrafficPolicy config for values.yaml" +- name: "persistence" + required: false + description: "persistence config for values.yaml" +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "startFromSnapshot" + required: false + description: "startFromSnapshot config for values.yaml" +- name: "disasterRecovery" + required: false + description: "disasterRecovery config for values.yaml" +- name: "preUpgradeJob" + required: false + description: "preUpgradeJob config for values.yaml" +- name: "defrag" + required: false + description: "defrag config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "metrics.enabled" + required: false + description: "metrics enabled or not config for values.yaml" +archive: false diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml new file mode 100644 index 00000000..ab394d63 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml @@ -0,0 +1,42 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="client")].port }' + + - name: PEER_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' + + {{- if .Values.auth.create }} + - name: USER + value: root + + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + jsonpath: '{ .data.etcd-root-password }' + {{- end }} + + {{- end }} diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/instance-schema.json b/addons/etcd/3.6/plans/standard-16c32g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-16c32g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/meta.yaml b/addons/etcd/3.6/plans/standard-16c32g3w/meta.yaml new file mode 100644 index 00000000..91186ad1 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-16c32g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c32g3w" +id: 0be06588-90fa-4f4c-bdd2-94af3828a04f +description: "etcd standard-16c32g3w plan which limit resources 16 cores 32G memory and 3 workers." +displayName: "standard-16c32g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/values.yaml b/addons/etcd/3.6/plans/standard-16c32g3w/values.yaml new file mode 100644 index 00000000..caa2b8ff --- /dev/null +++ b/addons/etcd/3.6/plans/standard-16c32g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override etcd.fullname template +## +fullnameOverride: hb-etcd-standard-16c32g + +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for etcd containers +## @param resources.requests The requested resources for etcd containers +## +resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 2 + memory: 4Gi + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable etcd data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for etcd data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for etcd data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## If you change this value, you might have to adjust `etcd.diskFreeLimit` as well + ## + size: 64Gi diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml new file mode 100644 index 00000000..ab394d63 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml @@ -0,0 +1,42 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="client")].port }' + + - name: PEER_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' + + {{- if .Values.auth.create }} + - name: USER + value: root + + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + jsonpath: '{ .data.etcd-root-password }' + {{- end }} + + {{- end }} diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/instance-schema.json b/addons/etcd/3.6/plans/standard-1c2g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-1c2g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/meta.yaml b/addons/etcd/3.6/plans/standard-1c2g3w/meta.yaml new file mode 100644 index 00000000..6ad64d6c --- /dev/null +++ b/addons/etcd/3.6/plans/standard-1c2g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g3w" +id: becad715-e953-45d1-85eb-c1360e09d830 +description: "etcd standard-2c4g3w plan which limit resources 1 core 2G memory and 3 workers." +displayName: "standard-1c2g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/values.yaml b/addons/etcd/3.6/plans/standard-1c2g3w/values.yaml new file mode 100644 index 00000000..8de417e8 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-1c2g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override etcd.fullname template +## +fullnameOverride: hb-etcd-standard-1c2g + +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for etcd containers +## @param resources.requests The requested resources for etcd containers +## +resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable etcd data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for etcd data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for etcd data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## If you change this value, you might have to adjust `etcd.diskFreeLimit` as well + ## + size: 5Gi diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml new file mode 100644 index 00000000..ab394d63 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml @@ -0,0 +1,42 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="client")].port }' + + - name: PEER_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' + + {{- if .Values.auth.create }} + - name: USER + value: root + + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + jsonpath: '{ .data.etcd-root-password }' + {{- end }} + + {{- end }} diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/instance-schema.json b/addons/etcd/3.6/plans/standard-2c4g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-2c4g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/meta.yaml b/addons/etcd/3.6/plans/standard-2c4g3w/meta.yaml new file mode 100644 index 00000000..4ab53ac9 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-2c4g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g3w" +id: a944eb9d-acdc-4777-ad76-baef535fd895 +description: "etcd standard-2c4g3w plan which limit resources 2 cores 4G memory and 3 workers." +displayName: "standard-2c4g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/values.yaml b/addons/etcd/3.6/plans/standard-2c4g3w/values.yaml new file mode 100644 index 00000000..dddc5235 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-2c4g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override etcd.fullname template +## +fullnameOverride: hb-etcd-standard-2c4g + +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for etcd containers +## @param resources.requests The requested resources for etcd containers +## +resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 500m + memory: 512Mi + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable etcd data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for etcd data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for etcd data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## If you change this value, you might have to adjust `etcd.diskFreeLimit` as well + ## + size: 8Gi diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml new file mode 100644 index 00000000..ab394d63 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml @@ -0,0 +1,42 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="client")].port }' + + - name: PEER_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' + + {{- if .Values.auth.create }} + - name: USER + value: root + + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + jsonpath: '{ .data.etcd-root-password }' + {{- end }} + + {{- end }} diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/instance-schema.json b/addons/etcd/3.6/plans/standard-4c8g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-4c8g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/meta.yaml b/addons/etcd/3.6/plans/standard-4c8g3w/meta.yaml new file mode 100644 index 00000000..9336cf84 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-4c8g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g3w" +id: b0b8be2e-f1d6-405b-a526-be2c1c004987 +description: "etcd standard-4c8g3w plan which limit resources 4 cores 8G memory and 3 workers." +displayName: "standard-4c8g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/values.yaml b/addons/etcd/3.6/plans/standard-4c8g3w/values.yaml new file mode 100644 index 00000000..659b1e96 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-4c8g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override etcd.fullname template +## +fullnameOverride: hb-etcd-standard-4c8g + +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for etcd containers +## @param resources.requests The requested resources for etcd containers +## +resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable etcd data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for etcd data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for etcd data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## If you change this value, you might have to adjust `etcd.diskFreeLimit` as well + ## + size: 16Gi diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml new file mode 100644 index 00000000..ab394d63 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml @@ -0,0 +1,42 @@ +credential: + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: DOMAIN + value: {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="client")].port }' + + - name: PEER_PORT + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' + + {{- if .Values.auth.create }} + - name: USER + value: root + + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + jsonpath: '{ .data.etcd-root-password }' + {{- end }} + + {{- end }} diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/instance-schema.json b/addons/etcd/3.6/plans/standard-8c16g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-8c16g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/meta.yaml b/addons/etcd/3.6/plans/standard-8c16g3w/meta.yaml new file mode 100644 index 00000000..8ce82b2a --- /dev/null +++ b/addons/etcd/3.6/plans/standard-8c16g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g3w" +id: e0c7ee78-b841-449b-9681-ad10912bbaa2 +description: "etcd standard-8c16g3w plan which limit resources 8 cores 16G memory and 3 workers." +displayName: "standard-8c16g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/values.yaml b/addons/etcd/3.6/plans/standard-8c16g3w/values.yaml new file mode 100644 index 00000000..34da9f15 --- /dev/null +++ b/addons/etcd/3.6/plans/standard-8c16g3w/values.yaml @@ -0,0 +1,48 @@ +## @param fullnameOverride String to fully override etcd.fullname template +## +fullnameOverride: hb-etcd-standard-8c16g + +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for etcd containers +## @param resources.requests The requested resources for etcd containers +## +resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 1 + memory: 2Gi + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 3 + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable etcd data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for etcd data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access Modes for etcd data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## If you change this value, you might have to adjust `etcd.diskFreeLimit` as well + ## + size: 32Gi diff --git a/addons/index.yaml b/addons/index.yaml index 2e8e5ea4..8a4ed8df 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -87,3 +87,6 @@ entries: victoriametrics: - version: "1" description: "VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It is designed to handle large amounts of data with high performance and low resource usage." + etcd: + - version: "3.6" + description: "A distributed, reliable key-value store for the most critical data of a distributed system." From f33300e1f19909f7527928240ac710ee4e440659 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Mon, 22 Dec 2025 10:44:04 +0800 Subject: [PATCH 81/93] chore(etcd): update etcd bind --- addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml | 2 -- addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml | 1 - addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml | 1 - addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml | 2 -- addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml | 2 -- 5 files changed, 8 deletions(-) diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml index ab394d63..d027b433 100644 --- a/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml @@ -38,5 +38,3 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} - - {{- end }} diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml index ab394d63..f3822438 100644 --- a/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml @@ -39,4 +39,3 @@ credential: jsonpath: '{ .data.etcd-root-password }' {{- end }} - {{- end }} diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml index ab394d63..f3822438 100644 --- a/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml @@ -39,4 +39,3 @@ credential: jsonpath: '{ .data.etcd-root-password }' {{- end }} - {{- end }} diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml index ab394d63..d027b433 100644 --- a/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml @@ -38,5 +38,3 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} - - {{- end }} diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml index ab394d63..d027b433 100644 --- a/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml @@ -38,5 +38,3 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} - - {{- end }} From 071644bf9c9e9454ada17bb4d4cbae2a0a31496b Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 25 Dec 2025 10:40:24 +0800 Subject: [PATCH 82/93] chore(etcd): update etcd bind --- addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml | 3 ++- addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml | 2 +- addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml | 2 +- addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml | 3 ++- addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml | 3 ++- 5 files changed, 8 insertions(+), 5 deletions(-) diff --git a/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml index d027b433..d034760f 100644 --- a/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-16c32g3w/bind.yaml @@ -28,7 +28,7 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' - {{- if .Values.auth.create }} + {{- if .Values.auth.rbac.create }} - name: USER value: root @@ -38,3 +38,4 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} + diff --git a/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml index f3822438..d034760f 100644 --- a/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-1c2g3w/bind.yaml @@ -28,7 +28,7 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' - {{- if .Values.auth.create }} + {{- if .Values.auth.rbac.create }} - name: USER value: root diff --git a/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml index f3822438..d034760f 100644 --- a/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-2c4g3w/bind.yaml @@ -28,7 +28,7 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' - {{- if .Values.auth.create }} + {{- if .Values.auth.rbac.create }} - name: USER value: root diff --git a/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml index d027b433..d034760f 100644 --- a/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-4c8g3w/bind.yaml @@ -28,7 +28,7 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' - {{- if .Values.auth.create }} + {{- if .Values.auth.rbac.create }} - name: USER value: root @@ -38,3 +38,4 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} + diff --git a/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml index d027b433..d034760f 100644 --- a/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml +++ b/addons/etcd/3.6/plans/standard-8c16g3w/bind.yaml @@ -28,7 +28,7 @@ credential: name: {{ include "common.names.fullname" . }} jsonpath: '{ .spec.ports[?(@.targetPort=="peer")].port }' - {{- if .Values.auth.create }} + {{- if .Values.auth.rbac.create }} - name: USER value: root @@ -38,3 +38,4 @@ credential: name: {{ include "etcd.secretName" . }} jsonpath: '{ .data.etcd-root-password }' {{- end }} + From ba410ee51a9e5dc04cdadeecfda8ec80e23602b2 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Wed, 7 Jan 2026 15:24:03 +0800 Subject: [PATCH 83/93] fix(kafka): plans limit --- addons/kafka/3.6/plans/standard-1c2g3w/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml index 10e61b73..2fef60fe 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml @@ -18,7 +18,7 @@ controller: memory: 2Gi requests: cpu: 200m - memory: 256Gi + memory: 256Mi ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## From d4001ebb751d30e318fe21a2b38c6de93c704983 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Wed, 7 Jan 2026 16:12:24 +0800 Subject: [PATCH 84/93] chore(kafka): bind info --- addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml | 2 +- addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml | 2 +- addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml | 2 +- addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml | 2 +- addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml | 2 +- addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-16c32g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-24c64g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-2c4g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-4c8g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml index b3b38988..2dfefe03 100644 --- a/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml +++ b/addons/kafka/3.6/plans/standard-8c16g3w/bind.yaml @@ -32,7 +32,7 @@ credential: valueFrom: serviceRef: name: {{ template "common.names.fullname" . }} - jsonpath: '{ .spec.ports[?(@.name=="tcp-external")].port }' + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' {{- end }} {{- $brokerList := list }} From 4fff398537aea652b78bd31b07b2de6c34ec878d Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 2 Feb 2026 14:46:53 +0800 Subject: [PATCH 85/93] fix(postgresql-cluster): fix typo (#124) --- addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml | 2 +- addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml | 2 +- addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml | 2 +- .../postgresql-cluster/15/plans/standard-32c128g800/bind.yaml | 2 +- .../postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml | 2 +- addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml | 2 +- addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml | 2 +- addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml | 2 +- addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml | 2 +- addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml | 2 +- .../postgresql-cluster/16/plans/standard-32c128g800/bind.yaml | 2 +- .../postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml | 2 +- addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml | 2 +- addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml | 2 +- addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml | 2 +- addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml | 2 +- addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml | 2 +- .../postgresql-cluster/17/plans/standard-32c128g800/bind.yaml | 2 +- .../postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml | 2 +- addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml | 2 +- addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml | 2 +- 21 files changed, 21 insertions(+), 21 deletions(-) diff --git a/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-16c64g400/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c4g20/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-2c8g50/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c128g800/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-32c64g4000/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-4c16g100/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml +++ b/addons/postgresql-cluster/15/plans/standard-8c32g200/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-16c64g400/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c4g20/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-2c8g50/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c128g800/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-32c64g4000/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-4c16g100/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml +++ b/addons/postgresql-cluster/16/plans/standard-8c32g200/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-16c64g400/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-2c4g20/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-2c8g50/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-32c128g800/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-32c64g4000/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-4c16g100/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml index 5fc257f9..55955ed6 100644 --- a/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml +++ b/addons/postgresql-cluster/17/plans/standard-8c32g200/bind.yaml @@ -37,5 +37,5 @@ credential: jsonpath: '{ .data.admin-user }' - name: PORT value: 5432 - - name: DADABASE + - name: DATABASE value: postgres \ No newline at end of file From b0eeee2fb6223864d05e9bdc9ce0e1d774fb1039 Mon Sep 17 00:00:00 2001 From: Eamon Date: Mon, 2 Feb 2026 15:07:39 +0800 Subject: [PATCH 86/93] chore(mongodb): change pull policy to IfNotPresent --- addons/mongodb/7.0/chart/mongodb/values.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/addons/mongodb/7.0/chart/mongodb/values.yaml b/addons/mongodb/7.0/chart/mongodb/values.yaml index ac06f422..0f9e27e6 100644 --- a/addons/mongodb/7.0/chart/mongodb/values.yaml +++ b/addons/mongodb/7.0/chart/mongodb/values.yaml @@ -111,7 +111,7 @@ image: ## Specify a imagePullPolicy ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## - pullPolicy: Always + pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ @@ -494,7 +494,7 @@ podSecurityContext: ## @param containerSecurityContext.enabled Enable MongoDB(®) container(s)' Security Context ## @param containerSecurityContext.runAsUser User ID for the MongoDB(®) container ## @param containerSecurityContext.runAsNonRoot Set MongoDB(®) container's Security Context runAsNonRoot -## +## containerSecurityContext: enabled: true runAsUser: 1001 @@ -1516,7 +1516,7 @@ hidden: ## @param hidden.terminationGracePeriodSeconds Hidden Termination Grace Period ## terminationGracePeriodSeconds: "" - ## @param hidden.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet + ## @param hidden.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies ## updateStrategy: ## type: RollingUpdate @@ -1873,10 +1873,10 @@ metrics: ## @param metrics.resources.requests The requested resources for Prometheus exporter containers ## resources: - limits: + limits: cpu: 500m memory: 512Mi - requests: + requests: cpu: 100m memory: 128Mi ## @param metrics.containerPort Port of the Prometheus metrics container @@ -2036,7 +2036,7 @@ metrics: networkPolicy: ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources ## - enabled: true + enabled: true ## @param networkPolicy.allowExternal The Policy model to apply. ## When set to false, only pods with the correct ## client label will have network access to the port Mongodb is listening From e56cc8b135733cfccd5c92f18876c5fd92d8e45f Mon Sep 17 00:00:00 2001 From: Eamon Date: Tue, 3 Feb 2026 09:26:38 +0800 Subject: [PATCH 87/93] chore(mongodb): add mongodb 8.0 --- addons/index.yaml | 3 + .../mongodb/8.0/chart/mongodb-8.0/.helmignore | 21 + .../mongodb/8.0/chart/mongodb-8.0/Chart.yaml | 29 + .../mongodb/8.0/chart/mongodb-8.0/README.md | 787 +++++++ .../8.0/chart/mongodb-8.0/templates/NOTES.txt | 202 ++ .../chart/mongodb-8.0/templates/_helpers.tpl | 432 ++++ .../templates/arbiter/configmap.yaml | 18 + .../templates/arbiter/headless-svc.yaml | 33 + .../mongodb-8.0/templates/arbiter/pdb.yaml | 25 + .../templates/arbiter/statefulset.yaml | 279 +++ .../templates/common-scripts-cm.yaml | 104 + .../mongodb-8.0/templates/configmap.yaml | 18 + .../mongodb-8.0/templates/extra-list.yaml | 4 + .../templates/hidden/configmap.yaml | 15 + .../templates/hidden/external-access-svc.yaml | 67 + .../templates/hidden/headless-svc.yaml | 34 + .../mongodb-8.0/templates/hidden/pdb.yaml | 22 + .../templates/hidden/statefulset.yaml | 533 +++++ .../templates/initialization-configmap.yaml | 17 + .../mongodb-8.0/templates/metrics-svc.yaml | 33 + .../mongodb-8.0/templates/networkpolicy.yaml | 45 + .../mongodb-8.0/templates/prometheusrule.yaml | 18 + .../8.0/chart/mongodb-8.0/templates/psp.yaml | 50 + .../templates/replicaset/access-svc.yaml | 32 + .../replicaset/external-access-svc.yaml | 67 + .../templates/replicaset/headless-svc.yaml | 34 + .../mongodb-8.0/templates/replicaset/pdb.yaml | 25 + .../replicaset/scripts-configmap.yaml | 301 +++ .../templates/replicaset/statefulset.yaml | 543 +++++ .../mongodb-8.0/templates/replicaset/svc.yaml | 43 + .../8.0/chart/mongodb-8.0/templates/role.yaml | 30 + .../mongodb-8.0/templates/rolebinding.yaml | 19 + .../mongodb-8.0/templates/secrets-ca.yaml | 37 + .../chart/mongodb-8.0/templates/secrets.yaml | 41 + .../mongodb-8.0/templates/serviceaccount.yaml | 23 + .../mongodb-8.0/templates/servicemonitor.yaml | 48 + .../templates/standalone/dep-sts.yaml | 474 ++++ .../mongodb-8.0/templates/standalone/pvc.yaml | 33 + .../mongodb-8.0/templates/standalone/svc.yaml | 58 + .../8.0/chart/mongodb-8.0/values.schema.json | 173 ++ .../mongodb/8.0/chart/mongodb-8.0/values.yaml | 2048 +++++++++++++++++ addons/mongodb/8.0/meta.yaml | 24 + .../8.0/plans/standard-16c64g400/bind.yaml | 36 + .../standard-16c64g400/instance-schema.json | 12 + .../8.0/plans/standard-16c64g400/meta.yaml | 6 + .../8.0/plans/standard-16c64g400/values.yaml | 23 + .../8.0/plans/standard-1c2g10/bind.yaml | 36 + .../standard-1c2g10/instance-schema.json | 12 + .../8.0/plans/standard-1c2g10/meta.yaml | 6 + .../8.0/plans/standard-1c2g10/values.yaml | 23 + .../8.0/plans/standard-2c4g20/bind.yaml | 36 + .../standard-2c4g20/instance-schema.json | 12 + .../8.0/plans/standard-2c4g20/meta.yaml | 6 + .../8.0/plans/standard-2c4g20/values.yaml | 23 + .../8.0/plans/standard-2c8g50/bind.yaml | 36 + .../standard-2c8g50/instance-schema.json | 12 + .../8.0/plans/standard-2c8g50/meta.yaml | 6 + .../8.0/plans/standard-2c8g50/values.yaml | 23 + .../8.0/plans/standard-32c128g800/bind.yaml | 36 + .../standard-32c128g800/instance-schema.json | 12 + .../8.0/plans/standard-32c128g800/meta.yaml | 6 + .../8.0/plans/standard-32c128g800/values.yaml | 23 + .../8.0/plans/standard-4c16g100/bind.yaml | 36 + .../standard-4c16g100/instance-schema.json | 12 + .../8.0/plans/standard-4c16g100/meta.yaml | 6 + .../8.0/plans/standard-4c16g100/values.yaml | 23 + .../8.0/plans/standard-8c32g200/bind.yaml | 36 + .../standard-8c32g200/instance-schema.json | 12 + .../8.0/plans/standard-8c32g200/meta.yaml | 6 + .../8.0/plans/standard-8c32g200/values.yaml | 23 + 70 files changed, 7381 insertions(+) create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/.helmignore create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/Chart.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/README.md create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/NOTES.txt create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/_helpers.tpl create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/configmap.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/headless-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/pdb.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/statefulset.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/common-scripts-cm.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/configmap.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/extra-list.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/configmap.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/external-access-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/headless-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/pdb.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/statefulset.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/initialization-configmap.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/metrics-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/networkpolicy.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/prometheusrule.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/psp.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/access-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/external-access-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/headless-svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/pdb.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/scripts-configmap.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/statefulset.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/role.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/rolebinding.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets-ca.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/serviceaccount.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/servicemonitor.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/dep-sts.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/pvc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/svc.yaml create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/values.schema.json create mode 100644 addons/mongodb/8.0/chart/mongodb-8.0/values.yaml create mode 100644 addons/mongodb/8.0/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-16c64g400/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-16c64g400/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-16c64g400/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-16c64g400/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-1c2g10/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-1c2g10/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-1c2g10/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-1c2g10/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c4g20/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c4g20/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-2c4g20/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c4g20/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c8g50/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c8g50/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-2c8g50/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-2c8g50/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-32c128g800/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-32c128g800/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-32c128g800/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-32c128g800/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-4c16g100/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-4c16g100/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-4c16g100/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-4c16g100/values.yaml create mode 100644 addons/mongodb/8.0/plans/standard-8c32g200/bind.yaml create mode 100644 addons/mongodb/8.0/plans/standard-8c32g200/instance-schema.json create mode 100644 addons/mongodb/8.0/plans/standard-8c32g200/meta.yaml create mode 100644 addons/mongodb/8.0/plans/standard-8c32g200/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index 8a4ed8df..cdbe36d5 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -67,6 +67,9 @@ entries: mongodb: - version: 7.0 description: "MongoDB is a document database designed for ease of application development and scaling." + mongodb: + - version: 8.0 + description: "MongoDB is a document database designed for ease of application development and scaling." clickhouse: - version: 24 description: "ClickHouse is the fastest and most resource efficient open-source database for real-time apps and analytics." diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/.helmignore b/addons/mongodb/8.0/chart/mongodb-8.0/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/Chart.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/Chart.yaml new file mode 100644 index 00000000..772e2a69 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 8.0.17 +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + tags: + - drycc-common + version: ~1.1.2 +description: MongoDB(R) is a relational open source NoSQL database. Easy to use, it stores data in JSON-like documents. Automated scalability and high-performance. Ideal for developing cloud native applications. +engine: gotpl +home: https://github.com/drycc/charts/tree/master/drycc/mongodb +icon: https://drycc.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: + - mongodb + - database + - nosql + - cluster + - replicaset + - replication +maintainers: + - name: Drycc + url: https://github.com/drycc/charts +name: mongodb +sources: + - https://github.com/drycc/containers/tree/main/drycc/mongodb + - https://mongodb.org +version: 13.1.7 diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/README.md b/addons/mongodb/8.0/chart/mongodb-8.0/README.md new file mode 100644 index 00000000..5a83118a --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/README.md @@ -0,0 +1,787 @@ + + +# MongoDB(R) packaged by Drycc + +MongoDB(R) is a relational open source NoSQL database. Easy to use, it stores data in JSON-like documents. Automated scalability and high-performance. Ideal for developing cloud native applications. + +[Overview of MongoDB®](http://www.mongodb.org) + +Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB(R) is run and maintained by MongoDB, which is a completely separate project from Drycc. + +## TL;DR + +```bash +$ helm repo add my-repo https://charts.drycc.com/drycc +$ helm install my-release my-repo/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB(®)](https://github.com/drycc/containers/tree/main/drycc/mongodb) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Drycc charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release my-repo/mongodb +``` + +The command deploys MongoDB(®) on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Architecture + +This chart allows installing MongoDB(®) using two different architecture setups: `standalone` or `replicaset`. Use the `architecture` parameter to choose the one to use: + +```console +architecture="standalone" +architecture="replicaset" +``` + +Refer to the [chart documentation for more information on each of these architectures](https://docs.drycc.com/kubernetes/infrastructure/mongodb/get-started/understand-architecture/). + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.namespaceOverride` | Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override mongodb.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonLabels` | Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonAnnotations` | Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MongoDB(®) parameters + +| Name | Description | Value | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------- | +| `image.registry` | MongoDB(®) image registry | `docker.io` | +| `image.repository` | MongoDB(®) image registry | `drycc/mongodb` | +| `image.tag` | MongoDB(®) image tag (immutable tags are recommended) | `6.0.2-debian-11-r1` | +| `image.digest` | MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | MongoDB(®) image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `architecture` | MongoDB(®) architecture (`standalone` or `replicaset`) | `standalone` | +| `useStatefulSet` | Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) | `false` | +| `auth.enabled` | Enable authentication | `true` | +| `auth.rootUser` | MongoDB(®) root user | `root` | +| `auth.rootPassword` | MongoDB(®) root password | `""` | +| `auth.usernames` | List of custom users to be created during the initialization | `[]` | +| `auth.passwords` | List of passwords for the custom users set at `auth.usernames` | `[]` | +| `auth.databases` | List of custom databases to be created during the initialization | `[]` | +| `auth.username` | DEPRECATED: use `auth.usernames` instead | `""` | +| `auth.password` | DEPRECATED: use `auth.passwords` instead | `""` | +| `auth.database` | DEPRECATED: use `auth.databases` instead | `""` | +| `auth.replicaSetKey` | Key used for authentication in the replicaset (only when `architecture=replicaset`) | `""` | +| `auth.existingSecret` | Existing secret with MongoDB(®) credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, ` mongodb-replica-set-key`) | `""` | +| `tls.enabled` | Enable MongoDB(®) TLS support between nodes in the cluster as well as between mongo clients and nodes | `false` | +| `tls.autoGenerated` | Generate a custom CA and self-signed certificates | `true` | +| `tls.existingSecret` | Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`, `client-pem`) | `""` | +| `tls.caCert` | Custom CA certificated (base64 encoded) | `""` | +| `tls.caKey` | CA certificate private key (base64 encoded) | `""` | +| `tls.image.registry` | Init container TLS certs setup image registry | `docker.io` | +| `tls.image.repository` | Init container TLS certs setup image repository | `drycc/nginx` | +| `tls.image.tag` | Init container TLS certs setup image tag (immutable tags are recommended) | `1.23.1-debian-11-r26` | +| `tls.image.digest` | Init container TLS certs setup image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `tls.image.pullPolicy` | Init container TLS certs setup image pull policy | `IfNotPresent` | +| `tls.image.pullSecrets` | Init container TLS certs specify docker-registry secret names as an array | `[]` | +| `tls.extraDnsNames` | Add extra dns names to the CA, can solve x509 auth issue for pod clients | `[]` | +| `tls.mode` | Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) | `requireTLS` | +| `tls.resources.limits` | Init container generate-tls-certs resource limits | `{}` | +| `tls.resources.requests` | Init container generate-tls-certs resource requests | `{}` | +| `hostAliases` | Add deployment host aliases | `[]` | +| `replicaSetName` | Name of the replica set (only when `architecture=replicaset`) | `rs0` | +| `replicaSetHostnames` | Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) | `true` | +| `enableIPv6` | Switch to enable/disable IPv6 on MongoDB(®) | `false` | +| `directoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB(®) | `false` | +| `systemLogVerbosity` | MongoDB(®) system log verbosity level | `0` | +| `disableSystemLog` | Switch to enable/disable MongoDB(®) system log | `false` | +| `disableJavascript` | Switch to enable/disable MongoDB(®) server-side JavaScript execution | `false` | +| `enableJournal` | Switch to enable/disable MongoDB(®) Journaling | `true` | +| `configuration` | MongoDB(®) configuration file to be used for Primary and Secondary nodes | `""` | + + +### replicaSetConfigurationSettings settings applied during runtime (not via configuration file) + +| Name | Description | Value | +| ----------------------------------------------- | --------------------------------------------------------------------------------------------------- | ------- | +| `replicaSetConfigurationSettings.enabled` | Enable MongoDB(®) Switch to enable/disable configuring MongoDB(®) run time rs.conf settings | `false` | +| `replicaSetConfigurationSettings.configuration` | run-time rs.conf settings | `{}` | +| `existingConfigmap` | Name of existing ConfigMap with MongoDB(®) configuration for Primary and Secondary nodes | `""` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | Existing ConfigMap with custom initdb scripts | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraFlags` | MongoDB(®) additional command line flags | `[]` | +| `extraEnvVars` | Extra environment variables to add to MongoDB(®) pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | + + +### MongoDB(®) statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | --------------------------------------------------------------------------------------------------------------- | --------------- | +| `annotations` | Additional labels to be added to the MongoDB(®) statefulset. Evaluated as a template | `{}` | +| `labels` | Annotations to be added to the MongoDB(®) statefulset. Evaluated as a template | `{}` | +| `replicaCount` | Number of MongoDB(®) nodes (only when `architecture=replicaset`) | `2` | +| `updateStrategy.type` | Strategy to use to replace existing MongoDB(®) pods. When architecture=standalone and useStatefulSet=false, | `RollingUpdate` | +| `podManagementPolicy` | Pod management policy for MongoDB(®) | `OrderedReady` | +| `podAffinityPreset` | MongoDB(®) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | MongoDB(®) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | MongoDB(®) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | MongoDB(®) Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | MongoDB(®) Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | MongoDB(®) Affinity for pod assignment | `{}` | +| `nodeSelector` | MongoDB(®) Node labels for pod assignment | `{}` | +| `tolerations` | MongoDB(®) Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | MongoDB(®) Spread Constraints for Pods | `[]` | +| `lifecycleHooks` | LifecycleHook for the MongoDB(®) container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | MongoDB(®) Termination Grace Period | `""` | +| `podLabels` | MongoDB(®) pod labels | `{}` | +| `podAnnotations` | MongoDB(®) Pod annotations | `{}` | +| `priorityClassName` | Name of the existing priority class to be used by MongoDB(®) pod(s) | `""` | +| `runtimeClassName` | Name of the runtime class to be used by MongoDB(®) pod(s) | `""` | +| `podSecurityContext.enabled` | Enable MongoDB(®) pod(s)' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the volumes of the MongoDB(®) pod(s) | `1001` | +| `podSecurityContext.sysctls` | sysctl settings of the MongoDB(®) pod(s)' | `[]` | +| `containerSecurityContext.enabled` | Enable MongoDB(®) container(s)' Security Context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the MongoDB(®) container | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set MongoDB(®) container's Security Context runAsNonRoot | `true` | +| `resources.limits` | The resources limits for MongoDB(®) containers | `{}` | +| `resources.requests` | The requested resources for MongoDB(®) containers | `{}` | +| `containerPorts.mongodb` | MongoDB(®) container port | `27017` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `20` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `10` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe for MongoDB(®) containers | `{}` | +| `customReadinessProbe` | Override default readiness probe for MongoDB(®) containers | `{}` | +| `customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `initContainers` | Add additional init containers for the hidden node pod(s) | `[]` | +| `sidecars` | Add additional sidecar containers for the MongoDB(®) pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MongoDB(®) container(s) | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes to the MongoDB(®) statefulset | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation for MongoDB(®) pod(s) | `false` | +| `pdb.minAvailable` | Minimum number/percentage of MongoDB(®) pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of MongoDB(®) pods that may be made unavailable after the eviction | `""` | + + +### Traffic exposure parameters + +| Name | Description | Value | +| -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `service.nameOverride` | MongoDB(®) service name | `""` | +| `service.type` | Kubernetes Service type (only for standalone architecture) | `ClusterIP` | +| `service.portName` | MongoDB(®) service port name (only for standalone architecture) | `mongodb` | +| `service.ports.mongodb` | MongoDB(®) service port. | `27017` | +| `service.nodePorts.mongodb` | Port to bind to for NodePort and LoadBalancer service types (only for standalone architecture) | `""` | +| `service.clusterIP` | MongoDB(®) service cluster IP (only for standalone architecture) | `""` | +| `service.externalIPs` | Specify the externalIP value ClusterIP service type (only for standalone architecture) | `[]` | +| `service.loadBalancerIP` | loadBalancerIP for MongoDB(®) Service (only for standalone architecture) | `""` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer (only for standalone architecture) | `[]` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Provide any additional annotations that may be required | `{}` | +| `service.externalTrafficPolicy` | service external traffic policy (only for standalone architecture) | `Local` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to MongoDB(®) nodes (only for replicaset architecture) | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `drycc/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.2-debian-11-r2` | +| `externalAccess.autoDiscovery.image.digest` | Init container auto-discovery image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.portName` | MongoDB(®) port name used for external access when service type is LoadBalancer | `mongodb` | +| `externalAccess.service.ports.mongodb` | MongoDB(®) port used for external access when service type is LoadBalancer | `27017` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(®) nodes | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.externalTrafficPolicy` | MongoDB(®) service external traffic policy | `Local` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort | `[]` | +| `externalAccess.service.domain` | Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort | `""` | +| `externalAccess.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `externalAccess.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `externalAccess.hidden.enabled` | Enable Kubernetes external cluster access to MongoDB(®) hidden nodes | `false` | +| `externalAccess.hidden.service.type` | Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.hidden.service.portName` | MongoDB(®) port name used for external access when service type is LoadBalancer | `mongodb` | +| `externalAccess.hidden.service.ports.mongodb` | MongoDB(®) port used for external access when service type is LoadBalancer | `27017` | +| `externalAccess.hidden.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(®) nodes | `[]` | +| `externalAccess.hidden.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.hidden.service.externalTrafficPolicy` | MongoDB(®) service external traffic policy | `Local` | +| `externalAccess.hidden.service.nodePorts` | Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.hidden.service.domain` | Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort | `""` | +| `externalAccess.hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `externalAccess.hidden.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.hidden.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `externalAccess.hidden.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | Enable MongoDB(®) data persistence using PVC | `true` | +| `persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` | +| `persistence.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` | +| `persistence.storageClass` | PVC Storage Class for MongoDB(®) data volume | `""` | +| `persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for MongoDB(®) data volume | `8Gi` | +| `persistence.annotations` | PVC annotations | `{}` | +| `persistence.mountPath` | Path to mount the volume at | `/drycc/mongodb` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | +| `persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` | +| `persistence.volumeClaimTemplates.dataSource` | Add dataSource to the VolumeClaimTemplate | `{}` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for MongoDB(®) pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.annotations` | Additional Service Account annotations | `{}` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | +| `rbac.rules` | Custom rules to create following the role specification | `[]` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.allowPrivilegeEscalation` | Enable privilege escalation | `false` | +| `podSecurityPolicy.privileged` | Allow privileged | `false` | +| `podSecurityPolicy.spec` | Specify the full spec to use for Pod Security Policy | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `drycc/drycc-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r37` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the volumePermissions container | `0` | + + +### Arbiter parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------- | --------------- | +| `arbiter.enabled` | Enable deploying the arbiter | `true` | +| `arbiter.hostAliases` | Add deployment host aliases | `[]` | +| `arbiter.configuration` | Arbiter configuration file to be used | `""` | +| `arbiter.existingConfigmap` | Name of existing ConfigMap with Arbiter configuration | `""` | +| `arbiter.command` | Override default container command (useful when using custom images) | `[]` | +| `arbiter.args` | Override default container args (useful when using custom images) | `[]` | +| `arbiter.extraFlags` | Arbiter additional command line flags | `[]` | +| `arbiter.extraEnvVars` | Extra environment variables to add to Arbiter pods | `[]` | +| `arbiter.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `arbiter.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | +| `arbiter.annotations` | Additional labels to be added to the Arbiter statefulset | `{}` | +| `arbiter.labels` | Annotations to be added to the Arbiter statefulset | `{}` | +| `arbiter.topologySpreadConstraints` | MongoDB(®) Spread Constraints for arbiter Pods | `[]` | +| `arbiter.lifecycleHooks` | LifecycleHook for the Arbiter container to automate configuration before or after startup | `{}` | +| `arbiter.terminationGracePeriodSeconds` | Arbiter Termination Grace Period | `""` | +| `arbiter.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` | +| `arbiter.podManagementPolicy` | Pod management policy for MongoDB(®) | `OrderedReady` | +| `arbiter.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `arbiter.podAffinityPreset` | Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `arbiter.podAntiAffinityPreset` | Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `arbiter.nodeAffinityPreset.type` | Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `arbiter.nodeAffinityPreset.key` | Arbiter Node label key to match Ignored if `affinity` is set. | `""` | +| `arbiter.nodeAffinityPreset.values` | Arbiter Node label values to match. Ignored if `affinity` is set. | `[]` | +| `arbiter.affinity` | Arbiter Affinity for pod assignment | `{}` | +| `arbiter.nodeSelector` | Arbiter Node labels for pod assignment | `{}` | +| `arbiter.tolerations` | Arbiter Tolerations for pod assignment | `[]` | +| `arbiter.podLabels` | Arbiter pod labels | `{}` | +| `arbiter.podAnnotations` | Arbiter Pod annotations | `{}` | +| `arbiter.priorityClassName` | Name of the existing priority class to be used by Arbiter pod(s) | `""` | +| `arbiter.runtimeClassName` | Name of the runtime class to be used by Arbiter pod(s) | `""` | +| `arbiter.podSecurityContext.enabled` | Enable Arbiter pod(s)' Security Context | `true` | +| `arbiter.podSecurityContext.fsGroup` | Group ID for the volumes of the Arbiter pod(s) | `1001` | +| `arbiter.podSecurityContext.sysctls` | sysctl settings of the Arbiter pod(s)' | `[]` | +| `arbiter.containerSecurityContext.enabled` | Enable Arbiter container(s)' Security Context | `true` | +| `arbiter.containerSecurityContext.runAsUser` | User ID for the Arbiter container | `1001` | +| `arbiter.containerSecurityContext.runAsNonRoot` | Set Arbiter containers' Security Context runAsNonRoot | `true` | +| `arbiter.resources.limits` | The resources limits for Arbiter containers | `{}` | +| `arbiter.resources.requests` | The requested resources for Arbiter containers | `{}` | +| `arbiter.containerPorts.mongodb` | MongoDB(®) arbiter container port | `27017` | +| `arbiter.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `arbiter.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `arbiter.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `arbiter.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `arbiter.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `arbiter.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `arbiter.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `arbiter.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `arbiter.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` | +| `arbiter.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `arbiter.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `arbiter.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `arbiter.startupProbe.enabled` | Enable startupProbe | `false` | +| `arbiter.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `arbiter.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `arbiter.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `arbiter.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `arbiter.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `arbiter.customLivenessProbe` | Override default liveness probe for Arbiter containers | `{}` | +| `arbiter.customReadinessProbe` | Override default readiness probe for Arbiter containers | `{}` | +| `arbiter.customStartupProbe` | Override default startup probe for Arbiter containers | `{}` | +| `arbiter.initContainers` | Add additional init containers for the Arbiter pod(s) | `[]` | +| `arbiter.sidecars` | Add additional sidecar containers for the Arbiter pod(s) | `[]` | +| `arbiter.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Arbiter container(s) | `[]` | +| `arbiter.extraVolumes` | Optionally specify extra list of additional volumes to the Arbiter statefulset | `[]` | +| `arbiter.pdb.create` | Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) | `false` | +| `arbiter.pdb.minAvailable` | Minimum number/percentage of Arbiter pods that should remain scheduled | `1` | +| `arbiter.pdb.maxUnavailable` | Maximum number/percentage of Arbiter pods that may be made unavailable | `""` | +| `arbiter.service.nameOverride` | The arbiter service name | `""` | +| `arbiter.service.ports.mongodb` | MongoDB(®) service port | `27017` | +| `arbiter.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `arbiter.service.annotations` | Provide any additional annotations that may be required | `{}` | + + +### Hidden Node parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------- | +| `hidden.enabled` | Enable deploying the hidden nodes | `false` | +| `hidden.hostAliases` | Add deployment host aliases | `[]` | +| `hidden.configuration` | Hidden node configuration file to be used | `""` | +| `hidden.existingConfigmap` | Name of existing ConfigMap with Hidden node configuration | `""` | +| `hidden.command` | Override default container command (useful when using custom images) | `[]` | +| `hidden.args` | Override default container args (useful when using custom images) | `[]` | +| `hidden.extraFlags` | Hidden node additional command line flags | `[]` | +| `hidden.extraEnvVars` | Extra environment variables to add to Hidden node pods | `[]` | +| `hidden.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `hidden.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | +| `hidden.annotations` | Additional labels to be added to thehidden node statefulset | `{}` | +| `hidden.labels` | Annotations to be added to the hidden node statefulset | `{}` | +| `hidden.topologySpreadConstraints` | MongoDB(®) Spread Constraints for hidden Pods | `[]` | +| `hidden.lifecycleHooks` | LifecycleHook for the Hidden container to automate configuration before or after startup | `{}` | +| `hidden.replicaCount` | Number of hidden nodes (only when `architecture=replicaset`) | `1` | +| `hidden.terminationGracePeriodSeconds` | Hidden Termination Grace Period | `""` | +| `hidden.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` | +| `hidden.podManagementPolicy` | Pod management policy for hidden node | `OrderedReady` | +| `hidden.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `hidden.podAffinityPreset` | Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `hidden.podAntiAffinityPreset` | Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `hidden.nodeAffinityPreset.type` | Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `hidden.nodeAffinityPreset.key` | Hidden Node label key to match Ignored if `affinity` is set. | `""` | +| `hidden.nodeAffinityPreset.values` | Hidden Node label values to match. Ignored if `affinity` is set. | `[]` | +| `hidden.affinity` | Hidden node Affinity for pod assignment | `{}` | +| `hidden.nodeSelector` | Hidden node Node labels for pod assignment | `{}` | +| `hidden.tolerations` | Hidden node Tolerations for pod assignment | `[]` | +| `hidden.podLabels` | Hidden node pod labels | `{}` | +| `hidden.podAnnotations` | Hidden node Pod annotations | `{}` | +| `hidden.priorityClassName` | Name of the existing priority class to be used by hidden node pod(s) | `""` | +| `hidden.runtimeClassName` | Name of the runtime class to be used by hidden node pod(s) | `""` | +| `hidden.podSecurityContext.enabled` | Enable Hidden pod(s)' Security Context | `true` | +| `hidden.podSecurityContext.fsGroup` | Group ID for the volumes of the Hidden pod(s) | `1001` | +| `hidden.podSecurityContext.sysctls` | sysctl settings of the Hidden pod(s)' | `[]` | +| `hidden.containerSecurityContext.enabled` | Enable Hidden container(s)' Security Context | `true` | +| `hidden.containerSecurityContext.runAsUser` | User ID for the Hidden container | `1001` | +| `hidden.containerSecurityContext.runAsNonRoot` | Set Hidden containers' Security Context runAsNonRoot | `true` | +| `hidden.resources.limits` | The resources limits for hidden node containers | `{}` | +| `hidden.resources.requests` | The requested resources for hidden node containers | `{}` | +| `hidden.containerPorts.mongodb` | MongoDB(®) hidden container port | `27017` | +| `hidden.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `hidden.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `hidden.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `hidden.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `hidden.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `hidden.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `hidden.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `hidden.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `hidden.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` | +| `hidden.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `hidden.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `hidden.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `hidden.startupProbe.enabled` | Enable startupProbe | `false` | +| `hidden.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `hidden.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `hidden.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `hidden.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `hidden.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `hidden.customLivenessProbe` | Override default liveness probe for hidden node containers | `{}` | +| `hidden.customReadinessProbe` | Override default readiness probe for hidden node containers | `{}` | +| `hidden.customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `hidden.initContainers` | Add init containers to the MongoDB(®) Hidden pods. | `[]` | +| `hidden.sidecars` | Add additional sidecar containers for the hidden node pod(s) | `[]` | +| `hidden.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the hidden node container(s) | `[]` | +| `hidden.extraVolumes` | Optionally specify extra list of additional volumes to the hidden node statefulset | `[]` | +| `hidden.pdb.create` | Enable/disable a Pod Disruption Budget creation for hidden node pod(s) | `false` | +| `hidden.pdb.minAvailable` | Minimum number/percentage of hidden node pods that should remain scheduled | `1` | +| `hidden.pdb.maxUnavailable` | Maximum number/percentage of hidden node pods that may be made unavailable | `""` | +| `hidden.persistence.enabled` | Enable hidden node data persistence using PVC | `true` | +| `hidden.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `hidden.persistence.storageClass` | PVC Storage Class for hidden node data volume | `""` | +| `hidden.persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `hidden.persistence.size` | PVC Storage Request for hidden node data volume | `8Gi` | +| `hidden.persistence.annotations` | PVC annotations | `{}` | +| `hidden.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB(®) images. | `/drycc/mongodb` | +| `hidden.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments | `""` | +| `hidden.persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | +| `hidden.persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` | +| `hidden.persistence.volumeClaimTemplates.dataSource` | Set volumeClaimTemplate dataSource | `{}` | +| `hidden.service.portName` | MongoDB(®) service port name | `mongodb` | +| `hidden.service.ports.mongodb` | MongoDB(®) service port | `27017` | +| `hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `hidden.service.annotations` | Provide any additional annotations that may be required | `{}` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `metrics.enabled` | Enable using a sidecar Prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB(®) Prometheus exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB(®) Prometheus exporter image repository | `drycc/mongodb-exporter` | +| `metrics.image.tag` | MongoDB(®) Prometheus exporter image tag (immutable tags are recommended) | `0.34.0-debian-11-r19` | +| `metrics.image.digest` | MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | MongoDB(®) Prometheus exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.username` | String with username for the metrics exporter | `""` | +| `metrics.password` | String with password for the metrics exporter | `""` | +| `metrics.extraFlags` | String with extra flags to the metrics exporter | `""` | +| `metrics.command` | Override default container command (useful when using custom images) | `[]` | +| `metrics.args` | Override default container args (useful when using custom images) | `[]` | +| `metrics.resources.limits` | The resources limits for Prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for Prometheus exporter containers | `{}` | +| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` | +| `metrics.service.annotations` | Annotations for Prometheus Exporter pods. Evaluated as a template. | `{}` | +| `metrics.service.type` | Type of the Prometheus metrics service | `ClusterIP` | +| `metrics.service.ports.metrics` | Port of the Prometheus metrics service | `9216` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Override default liveness probe for MongoDB(®) containers | `{}` | +| `metrics.customReadinessProbe` | Override default readiness probe for MongoDB(®) containers | `{}` | +| `metrics.customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.labels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | Namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | Rules to be created, check values for an example | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.rootPassword=secretpassword,auth.username=my-user,auth.password=my-password,auth.database=my-database \ + my-repo/mongodb +``` + +The above command sets the MongoDB(®) `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml my-repo/mongodb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.drycc.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Drycc will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customize a new MongoDB instance + +The [Drycc MongoDB(®) image](https://github.com/drycc/containers/tree/main/drycc/mongodb) supports the use of custom scripts to initialize a fresh instance. In order to execute the scripts, two options are available: + +* Specify them using the `initdbScripts` parameter as dict. +* Define an external Kubernetes ConfigMap with all the initialization scripts by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option. + +The allowed script extensions are `.sh` and `.js`. + +### Replicaset: Access MongoDB(®) nodes from outside the cluster + +In order to access MongoDB(®) nodes from outside the cluster when using a replicaset architecture, a specific service per MongoDB(®) pod will be created. There are two ways of configuring external access: + +- Using LoadBalancer services +- Using NodePort services. + +Refer to the [chart documentation for more details and configuration examples](https://docs.drycc.com/kubernetes/infrastructure/mongodb/configuration/configure-external-access-replicaset/). + +### Add extra environment variables + +To add extra environment variables (useful for advanced operations like custom init scripts), use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use Sidecars and Init Containers + +If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter. + +Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.drycc.com/kubernetes/infrastructure/mongodb/configuration/configure-sidecar-init-containers/). + +## Persistence + +The [Drycc MongoDB(®)](https://github.com/drycc/containers/tree/main/drycc/mongodb) image stores the MongoDB(®) data and configurations at the `/drycc/mongodb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.drycc.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +## Use custom Prometheus rules + +Custom Prometheus rules can be defined for the Prometheus Operator by using the `prometheusRule` parameter. + +Refer to the [chart documentation for an example of a custom rule](https://docs.drycc.com/kubernetes/infrastructure/mongodb/administration/use-prometheus-rules/). + +## Enable SSL/TLS + +This chart supports enabling SSL/TLS between nodes in the cluster, as well as between MongoDB(®) clients and nodes, by setting the `MONGODB_EXTRA_FLAGS` and `MONGODB_CLIENT_EXTRA_FLAGS` container environment variables, together with the correct `MONGODB_ADVERTISED_HOSTNAME`. To enable full TLS encryption, set the `tls.enabled` parameter to `true`. + +Refer to the [chart documentation for more information on enabling TLS](https://docs.drycc.com/kubernetes/infrastructure/mongodb/administration/enable-tls/). + +### Set Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [drycc/common](https://github.com/drycc/charts/tree/master/drycc/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Drycc's Helm charts in [this troubleshooting guide](https://docs.drycc.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +If authentication is enabled, it's necessary to set the `auth.rootPassword` (also `auth.replicaSetKey` when using a replicaset architecture) when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release my-repo/mongodb --set auth.rootPassword=[PASSWORD] (--set auth.replicaSetKey=[REPLICASETKEY]) +``` + +> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes. + +### To 12.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Drycc charts repository. + +Affected values: + +- `strategyType` is replaced by `updateStrategy` +- `service.port` is renamed to `service.ports.mongodb` +- `service.nodePort` is renamed to `service.nodePorts.mongodb` +- `externalAccess.service.port` is renamed to `externalAccess.hidden.service.ports.mongodb` +- `rbac.role.rules` is renamed to `rbac.rules` +- `externalAccess.hidden.service.port` is renamed ot `externalAccess.hidden.service.ports.mongodb` +- `hidden.strategyType` is replaced by `hidden.updateStrategy` +- `metrics.serviceMonitor.relabellings` is renamed to `metrics.serviceMonitor.relabelings`(typo fixed) +- `metrics.serviceMonitor.additionalLabels` is renamed to `metrics.serviceMonitor.labels` + +Additionally also updates the MongoDB image dependency to it newest major, 5.0 + +### To 11.0.0 + +In this version, the mongodb-exporter bundled as part of this Helm chart was updated to a new version which, even it is not a major change, can contain breaking changes (from `0.11.X` to `0.30.X`). +Please visit the release notes from the upstream project at https://github.com/percona/mongodb_exporter/releases + +### To 10.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.drycc.com/kubernetes/infrastructure/mongodb/administration/upgrade-helm3/). + +### To 9.0.0 + +MongoDB(®) container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB(®). Refer to the following guides to upgrade your applications: + +- [Standalone](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-standalone/) +- [Replica Set](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-replica-set/) + +### To 8.0.0 + +- Architecture used to configure MongoDB(®) as a replicaset was completely refactored. Now, both primary and secondary nodes are part of the same statefulset. +- Chart labels were adapted to follow the Helm charts best practices. +- This version introduces `drycc/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/drycc/charts/tree/master/drycc/common#drycc-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- Several parameters were renamed or disappeared in favor of new ones on this major version. These are the most important ones: + - `replicas` is renamed to `replicaCount`. + - Authentication parameters are reorganized under the `auth.*` parameter: + - `usePassword` is renamed to `auth.enabled`. + - `mongodbRootPassword`, `mongodbUsername`, `mongodbPassword`, `mongodbDatabase`, and `replicaSet.key` are now `auth.rootPassword`, `auth.username`, `auth.password`, `auth.database`, and `auth.replicaSetKey` respectively. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Parameters prefixed with `mongodb` are renamed removing the prefix. E.g. `mongodbEnableIPv6` is renamed to `enableIPv6`. + - Parameters affecting Arbiter nodes are reorganized under the `arbiter.*` parameter. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MongoDB(®) chart, and migrate your data by creating a backup of the database, and restoring it on the new release. + +### To 7.0.0 + +From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example: + +```yaml +ingress: + hosts: + - name: mongodb.local + path: / +``` + +### To 6.0.0 + +From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command. +You can find more information in the [`drycc/mongodb` image README](https://github.com/drycc/containers/tree/main/drycc/mongodb#readme). + +### To 5.0.0 + +When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`: + +```console +$ kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false +``` + +### Add extra deployment options + +To add extra deployments (useful for advanced features like sidecars), use the `extraDeploy` property. + +In the example below, you can find how to use a example here for a [MongoDB replica set pod labeler sidecar](https://github.com/combor/k8s-mongo-labeler-sidecar) to identify the primary pod and dynamically label it as the primary node: + +```yaml +extraDeploy: + - apiVersion: v1 + kind: Service + metadata: + name: mongodb-primary + namespace: default + labels: + app.kubernetes.io/component: mongodb + app.kubernetes.io/instance: mongodb + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: mongodb + spec: + type: NodePort + externalTrafficPolicy: Cluster + ports: + - name: mongodb-primary + port: 30001 + nodePort: 30001 + protocol: TCP + targetPort: mongodb + selector: + app.kubernetes.io/component: mongodb + app.kubernetes.io/instance: mongodb + app.kubernetes.io/name: mongodb + primary: "true" +``` + +## License + +Copyright © 2022 Drycc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/NOTES.txt b/addons/mongodb/8.0/chart/mongodb-8.0/templates/NOTES.txt new file mode 100644 index 00000000..2bb79222 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/NOTES.txt @@ -0,0 +1,202 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/mongodb/entrypoint.sh /opt/drycc/scripts/mongodb/run.sh + +{{- else }} + +{{- $replicaCount := int .Values.replicaCount }} +{{- $portNumber := int .Values.service.ports.mongodb }} +{{- $fullname := include "mongodb.fullname" . }} +{{- $releaseNamespace := include "mongodb.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- $mongoList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $portNumber) }} +{{- end }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +#################################################################################### +### ERROR: You enabled external access to MongoDB® nodes without specifying ### +### the array of load balancer IPs for MongoDB® nodes. ### +#################################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for MongoDB® nodes. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $e, $i := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} my-repo/{{ .Chart.Name }} \ + --set mongodb.replicaCount={{ $replicaCount }} \ + --set mongodb.externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set mongodb.externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set mongodb.externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (and (eq .Values.architecture "standalone") (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort"))) (and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled)) (not .Values.auth.enabled) }} +------------------------------------------------------------------------------- + WARNING + + By not enabling "mongodb.auth.enabled" you have most likely exposed the + MongoDB® service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you enable authentiation + setting the "mongodb.auth.enabled" parameter to "true". + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB® can be accessed on the following DNS name(s) and ports from within your cluster: + +{{- if eq .Values.architecture "replicaset" }} +{{ join "\n" $mongoList | nindent 4 }} +{{- else }} + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{- if .Values.auth.enabled }} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.secretName" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 -d) + +{{- end }} +{{- $customUsers := include "mongodb.customUsers" . -}} +{{- $customDatabases := include "mongodb.customDatabases" . -}} +{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} +{{- $customUsersList := splitList "," $customUsers }} +{{- range $index, $user := $customUsersList }} + +To get the password for "{{ $user }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ include "mongodb.namespace" $ }} {{ include "mongodb.secretName" $ }} -o jsonpath="{.data.mongodb-passwords}" | base64 -d | awk -F',' '{print ${{ add 1 $index }}}') + +{{- end }} +{{- end }} + +To connect to your database, create a MongoDB® client container: + + kubectl run --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image {{ template "mongodb.image" . }} --command -- bash + +Then, run the following command: + + {{- if eq .Values.architecture "replicaset" }} + mongosh admin --host "{{ join "," $mongoList }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- else }} + mongosh admin --host "{{ template "mongodb.fullname" . }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- end }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }} + +To connect to your database nodes from outside, you need to add both primary and secondary nodes hostnames/IPs to your Mongo client. To obtain them, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + MongoDB® nodes domain: Use your provided hostname to reach MongoDB® nodes, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + MongoDB® nodes domain: you can reach MongoDB® nodes on any of the K8s nodes external IPs. + + kubectl get nodes -o wide + +{{- end }} + + MongoDB® nodes port: You will have a different node port for each MongoDB® node. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -w' + + MongoDB® nodes domain: You will have a different external IP for each MongoDB® node. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + MongoDB® nodes port: {{ .Values.externalAccess.service.ports.mongodb }} + +{{- end }} + +{{- else if eq .Values.architecture "standalone" }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "mongodb.namespace" . }} -w {{ template "mongodb.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + mongosh --host $SERVICE_IP --port {{ $portNumber }} {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ template "mongodb.namespace" . }} svc/{{ template "mongodb.fullname" . }} {{ $portNumber }}:{{ $portNumber }} & + mongosh --host 127.0.0.1 {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the MongoDB® Prometheus metrics, get the MongoDB® Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "mongodb.fullname" . }}-metrics {{ .Values.metrics.service.ports.metrics }}:{{ .Values.metrics.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} +{{- end }} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.tls.image }} + diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/_helpers.tpl b/addons/mongodb/8.0/chart/mongodb-8.0/templates/_helpers.tpl new file mode 100644 index 00000000..81f6889c --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/_helpers.tpl @@ -0,0 +1,432 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Create a default mongo service name which can be overridden. +*/}} +{{- define "mongodb.service.nameOverride" -}} + {{- if and .Values.service .Values.service.nameOverride -}} + {{- print .Values.service.nameOverride -}} + {{- else -}} + {{- printf "%s-headless" (include "mongodb.fullname" .) -}} + {{- end }} +{{- end }} + +{{/* +Create a default mongo arbiter service name which can be overridden. +*/}} +{{- define "mongodb.arbiter.service.nameOverride" -}} + {{- if and .Values.arbiter.service .Values.arbiter.service.nameOverride -}} + {{- print .Values.arbiter.service.nameOverride -}} + {{- else -}} + {{- printf "%s-arbiter-headless" (include "mongodb.fullname" .) -}} + {{- end }} +{{- end }} + +{{/* +Return the proper MongoDB® image name +*/}} +{{- define "mongodb.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb.metrics.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb.volumePermissions.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "mongodb.externalAccess.autoDiscovery.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the TLS Certs image) +*/}} +{{- define "mongodb.tls.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.tls.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image .Values.tls.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "mongodb.namespace" -}} + {{- if and .Values.global .Values.global.namespaceOverride -}} + {{- print .Values.global.namespaceOverride -}} + {{- else -}} + {{- print .Release.Namespace -}} + {{- end }} +{{- end -}} +{{- define "mongodb.serviceMonitor.namespace" -}} + {{- if .Values.metrics.serviceMonitor.namespace -}} + {{- print .Values.metrics.serviceMonitor.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} +{{- define "mongodb.prometheusRule.namespace" -}} + {{- if .Values.metrics.prometheusRule.namespace -}} + {{- print .Values.metrics.prometheusRule.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either mongodb.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "mongodb.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{- default (include "mongodb.fullname" .) (print .Values.serviceAccount.name) -}} + {{- else -}} + {{- default "default" (print .Values.serviceAccount.name) -}} + {{- end -}} +{{- end -}} + +{{/* +Return the list of custom users to create during the initialization (string format) +*/}} +{{- define "mongodb.customUsers" -}} + {{- $customUsers := list -}} + {{- if .Values.auth.username -}} + {{- $customUsers = append $customUsers .Values.auth.username }} + {{- end }} + {{- range .Values.auth.usernames }} + {{- $customUsers = append $customUsers . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customUsers)) -}} +{{- end -}} + +{{/* +Return the list of passwords for the custom users (string format) +*/}} +{{- define "mongodb.customPasswords" -}} + {{- $customPasswords := list -}} + {{- if .Values.auth.password -}} + {{- $customPasswords = append $customPasswords .Values.auth.password }} + {{- end }} + {{- range .Values.auth.passwords }} + {{- $customPasswords = append $customPasswords . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customPasswords)) -}} +{{- end -}} + +{{/* +Return the list of custom databases to create during the initialization (string format) +*/}} +{{- define "mongodb.customDatabases" -}} + {{- $customDatabases := list -}} + {{- if .Values.auth.database -}} + {{- $customDatabases = append $customDatabases .Values.auth.database }} + {{- end }} + {{- range .Values.auth.databases }} + {{- $customDatabases = append $customDatabases . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customDatabases)) -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration +*/}} +{{- define "mongodb.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® +*/}} +{{- define "mongodb.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MongoDB® credentials +*/}} +{{- define "mongodb.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} + {{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MongoDB® +*/}} +{{- define "mongodb.createSecret" -}} +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mongodb.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if the Arbiter should be deployed +*/}} +{{- define "mongodb.arbiter.enabled" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration for the Arbiter +*/}} +{{- define "mongodb.arbiter.configmapName" -}} +{{- if .Values.arbiter.existingConfigmap -}} + {{- printf "%s" (tpl .Values.arbiter.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-arbiter" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® Arbiter +*/}} +{{- define "mongodb.arbiter.createConfigmap" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled .Values.arbiter.configuration (not .Values.arbiter.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if the Hidden should be deployed +*/}} +{{- define "mongodb.hidden.enabled" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.hidden.enabled }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration for the Hidden +*/}} +{{- define "mongodb.hidden.configmapName" -}} +{{- if .Values.hidden.existingConfigmap -}} + {{- printf "%s" (tpl .Values.hidden.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-hidden" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® Hidden +*/}} +{{- define "mongodb.hidden.createConfigmap" -}} +{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.enabled .Values.hidden.configuration (not .Values.hidden.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mongodb.validateValues.pspAndRBAC" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBs" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBsLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.loadBalancerIPsListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate RBAC is created when using PSP */}} +{{- define "mongodb.validateValues.pspAndRBAC" -}} +{{- if and (.Values.podSecurityPolicy.create) (not .Values.rbac.create) -}} +mongodb: podSecurityPolicy.create, rbac.create + Both podSecurityPolicy.create and rbac.create must be true, if you want + to create podSecurityPolicy +{{- end -}} +{{- end -}} + +{{/* Validate values of MongoDB® - must provide a valid architecture */}} +{{- define "mongodb.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replicaset") -}} +mongodb: architecture + Invalid architecture selected. Valid values are "standalone" and + "replicaset". Please set a valid architecture (--set mongodb.architecture="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both auth.usernames and auth.databases are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.customUsersDBs" -}} +{{- $customUsers := include "mongodb.customUsers" . -}} +{{- $customDatabases := include "mongodb.customDatabases" . -}} +{{- if or (and (empty $customUsers) (not (empty $customDatabases))) (and (not (empty $customUsers)) (empty $customDatabases)) }} +mongodb: auth.usernames, auth.databases + Both auth.usernames and auth.databases must be provided to create + custom users and databases during 1st initialization. + Please set both of them (--set auth.usernames[0]="xxxx",auth.databases[0]="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both auth.usernames and auth.databases arrays should have the same length +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.customUsersDBsLength" -}} +{{- if ne (len .Values.auth.usernames) (len .Values.auth.databases) }} +mongodb: auth.usernames, auth.databases + Both auth.usernames and auth.databases arrays should have the same length +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - service type for external access +*/}} +{{- define "mongodb.validateValues.externalAccessServiceType" -}} +{{- if and (eq .Values.architecture "replicaset") (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}} +mongodb: externalAccess.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - number of replicas must be the same than LoadBalancer IPs list +*/}} +{{- define "mongodb.validateValues.loadBalancerIPsListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled ) (eq .Values.externalAccess.service.type "LoadBalancer") (not (eq $replicaCount $loadBalancerListLength )) -}} +mongodb: .Values.externalAccess.service.loadBalancerIPs + Number of replicas and loadBalancerIPs array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - number of replicas must be the same than NodePort list +*/}} +{{- define "mongodb.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "NodePort") (not (eq $replicaCount $nodePortListLength )) -}} +mongodb: .Values.externalAccess.service.nodePorts + Number of replicas and nodePorts array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - RBAC should be enabled when autoDiscovery is enabled +*/}} +{{- define "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +mongodb: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to autodetect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® exporter URI string - auth.enabled and/or tls.enabled must be enabled or it defaults +*/}} +{{- define "mongodb.mongodb_exporter.uri" -}} + {{- $uriTlsArgs := ternary "tls=true&tlsCertificateKeyFile=/certs/mongodb.pem&tlsCAFile=/certs/mongodb-ca-cert" "" .Values.tls.enabled -}} + {{- if .Values.metrics.username }} + {{- $uriAuth := ternary "$(echo $MONGODB_METRICS_USERNAME | sed -r \"s/@/%40/g;s/:/%3A/g\"):$(echo $MONGODB_METRICS_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}} + {{- printf "mongodb://%slocalhost:27017/admin?%s" $uriAuth $uriTlsArgs -}} + {{- else -}} + {{- $uriAuth := ternary "$MONGODB_ROOT_USER:$(echo $MONGODB_ROOT_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}} + {{- printf "mongodb://%slocalhost:27017/admin?%s" $uriAuth $uriTlsArgs -}} + {{- end -}} +{{- end -}} + + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "mongodb.createTlsSecret" -}} +{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing MongoDB® TLS certificates +*/}} +{{- define "mongodb.tlsSecretName" -}} +{{- $secretName := .Values.tls.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-ca" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/configmap.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/configmap.yaml new file mode 100644 index 00000000..1aacbd79 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mongodb.arbiter.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ print "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/headless-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/headless-svc.yaml new file mode 100644 index 00000000..2bc3658c --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/headless-svc.yaml @@ -0,0 +1,33 @@ +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.arbiter.service.nameOverride" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.arbiter.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.arbiter.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.arbiter.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-mongodb + port: {{ .Values.arbiter.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.arbiter.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/pdb.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/pdb.yaml new file mode 100644 index 00000000..6402f682 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (include "mongodb.arbiter.enabled" .) .Values.arbiter.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.arbiter.pdb.minAvailable }} + minAvailable: {{ .Values.arbiter.pdb.minAvailable }} + {{- end }} + {{- if .Values.arbiter.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.arbiter.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/statefulset.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/statefulset.yaml new file mode 100644 index 00000000..a54b3575 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/arbiter/statefulset.yaml @@ -0,0 +1,279 @@ +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.arbiter.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.arbiter.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.arbiter.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.arbiter.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + serviceName: {{ include "mongodb.arbiter.service.nameOverride" . }} + podManagementPolicy: {{ .Values.arbiter.podManagementPolicy }} + {{- if .Values.arbiter.updateStrategy }} + updateStrategy: {{- toYaml .Values.arbiter.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: arbiter + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: arbiter + {{- if .Values.arbiter.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.arbiter.createConfigmap" .) .Values.arbiter.podAnnotations }} + annotations: + {{- if (include "mongodb.arbiter.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/arbiter/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.arbiter.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.arbiter.schedulerName }} + schedulerName: {{ .Values.arbiter.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.arbiter.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAffinityPreset "component" "arbiter" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAntiAffinityPreset "component" "arbiter" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.arbiter.nodeAffinityPreset.type "key" .Values.arbiter.nodeAffinityPreset.key "values" .Values.arbiter.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.arbiter.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.priorityClassName }} + priorityClassName: {{ .Values.arbiter.priorityClassName }} + {{- end }} + {{- if .Values.arbiter.runtimeClassName }} + runtimeClassName: {{ .Values.arbiter.runtimeClassName }} + {{- end }} + {{- if .Values.arbiter.podSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.arbiter.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.arbiter.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if .Values.arbiter.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: generate-client + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: certs-volume + mountPath: /certs/CAs + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /drycc/scripts + command: + - /drycc/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.arbiter.service.nameOverride" . }} + {{- end }} + containers: + - name: mongodb-arbiter + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.arbiter.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.arbiter.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.arbiter.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.arbiter.service.nameOverride" . }}" + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.%s.$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) (include "mongodb.service.nameOverride" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: MONGODB_PORT_NUMBER + value: {{ .Values.arbiter.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- $extraFlags := .Values.arbiter.extraFlags | join " " -}} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.arbiter.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.extraEnvVarsCM .Values.arbiter.extraEnvVarsSecret }} + envFrom: + {{- if .Values.arbiter.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.arbiter.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.arbiter.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.arbiter.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ .Values.arbiter.containerPorts.mongodb }} + name: mongodb + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.arbiter.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- if .Values.arbiter.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- if .Values.arbiter.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- end }} + {{- if .Values.arbiter.resources }} + resources: {{- toYaml .Values.arbiter.resources | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumeMounts .Values.tls.enabled }} + volumeMounts: + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + mountPath: /opt/drycc/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.arbiter.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.arbiter.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumes .Values.tls.enabled }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0555 + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.arbiter.configmapName" . }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: certs + emptyDir: {} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- end }} + {{- if .Values.arbiter.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/common-scripts-cm.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/common-scripts-cm.yaml new file mode 100644 index 00000000..4493f7db --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/common-scripts-cm.yaml @@ -0,0 +1,104 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + startup-probe.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert' + {{- end }} + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true$' + readiness-probe.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert' + {{- end }} + # Run the proper check depending on the version + [[ $(mongod -version | grep "db version") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]} + . /opt/drycc/scripts/libversion.sh + VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)" + VERSION_MINOR="$(get_sematic_version "$VERSION" 2)" + VERSION_PATCH="$(get_sematic_version "$VERSION" 3)" + if [[ ( "$VERSION_MAJOR" -ge 5 ) || ( "$VERSION_MAJOR" -ge 4 && "$VERSION_MINOR" -ge 4 && "$VERSION_PATCH" -ge 2 ) ]]; then + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true$' + else + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.isMaster().ismaster || db.isMaster().secondary' | grep -q 'true$' + fi + ping-mongodb.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert' + {{- end }} + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval "db.adminCommand('ping')" + {{- if .Values.tls.enabled }} + generate-certs.sh: | + #!/bin/bash + additional_ips=() + additional_names=() + while getopts "i:n:s:" flag + do + case "${flag}" in + i) read -a additional_ips <<< ${OPTARG//,/ } ;; + n) read -a additional_names <<< ${OPTARG//,/ } ;; + s) svc=${OPTARG// /} ;; + \?) exit 1 ;; + esac + done + + my_hostname=$(hostname) + cp /certs/CAs/* /certs/ + cat >/certs/openssl.cnf <>/certs/openssl.cnf <>/certs/openssl.cnf < /certs/mongodb.pem + cd /certs/ + shopt -s extglob + rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf) + chmod 0600 mongodb-ca-cert mongodb.pem + {{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/configmap.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/configmap.yaml new file mode 100644 index 00000000..76608c4e --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mongodb.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/extra-list.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/configmap.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/configmap.yaml new file mode 100644 index 00000000..d7271f05 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/configmap.yaml @@ -0,0 +1,15 @@ +{{- if (include "mongodb.hidden.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/external-access-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/external-access-svc.yaml new file mode 100644 index 00000000..d9bbdc8e --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/external-access-svc.yaml @@ -0,0 +1,67 @@ +{{- if and (include "mongodb.hidden.enabled" .) .Values.externalAccess.hidden.enabled }} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.hidden.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-hidden-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-hidden-%d-external" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + pod: {{ $targetPod }} + {{- if or $root.Values.externalAccess.hidden.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.hidden.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.hidden.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.hidden.service.type }} + {{- if eq $root.Values.externalAccess.hidden.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.hidden.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.hidden.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq $root.Values.externalAccess.hidden.service.type "LoadBalancer") (eq $root.Values.externalAccess.hidden.service.type "NodePort")) }} + externalTrafficPolicy: {{ $root.Values.externalAccess.hidden.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.sessionAffinity }} + sessionAffinity: {{ $root.Values.externalAccess.hidden.service.sessionAffinity }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.hidden.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.externalAccess.hidden.service.portName | quote }} + port: {{ $root.Values.externalAccess.hidden.service.ports.mongodb }} + {{- if not (empty $root.Values.externalAccess.hidden.service.nodePorts) }} + {{- $nodePort := index $root.Values.externalAccess.hidden.service.nodePorts $i }} + nodePort: {{ $nodePort }} + {{- else }} + nodePort: null + {{- end }} + targetPort: mongodb + {{- if $root.Values.externalAccess.hidden.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.hidden.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: hidden + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/headless-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/headless-svc.yaml new file mode 100644 index 00000000..725e0256 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/headless-svc.yaml @@ -0,0 +1,34 @@ +{{- if (include "mongodb.hidden.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.hidden.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.hidden.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: {{ .Values.hidden.service.portName | quote }} + port: {{ .Values.hidden.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.hidden.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: hidden +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/pdb.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/pdb.yaml new file mode 100644 index 00000000..ce233db3 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/pdb.yaml @@ -0,0 +1,22 @@ +{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" . )}} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.hidden.pdb.minAvailable }} + minAvailable: {{ .Values.hidden.pdb.minAvailable }} + {{- end }} + {{- if .Values.hidden.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.hidden.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: hidden +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/statefulset.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/statefulset.yaml new file mode 100644 index 00000000..9373d937 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/hidden/statefulset.yaml @@ -0,0 +1,533 @@ +{{- if (include "mongodb.hidden.enabled" .) }} +{{- $replicaCount := int .Values.hidden.replicaCount }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.hidden.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.hidden.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.hidden.service.type "LoadBalancer")) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.hidden.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.hidden.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + podManagementPolicy: {{ .Values.hidden.podManagementPolicy }} + replicas: {{ .Values.hidden.replicaCount }} + {{- if .Values.hidden.updateStrategy }} + updateStrategy: {{- toYaml .Values.hidden.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: hidden + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: hidden + {{- if .Values.hidden.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.hidden.createConfigmap" .) .Values.hidden.podAnnotations }} + annotations: + {{- if (include "mongodb.hidden.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/hidden/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.hidden.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hidden.schedulerName }} + schedulerName: {{ .Values.hidden.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.hidden.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAffinityPreset "component" "" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAntiAffinityPreset "component" "" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.hidden.nodeAffinityPreset.type "key" .Values.hidden.nodeAffinityPreset.key "values" .Values.hidden.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hidden.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.priorityClassName }} + priorityClassName: {{ .Values.hidden.priorityClassName }} + {{- end }} + {{- if .Values.hidden.runtimeClassName }} + runtimeClassName: {{ .Values.hidden.runtimeClassName }} + {{- end }} + {{- if .Values.hidden.podSecurityContext.enabled }} + securityContext: {{- omit .Values.hidden.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.hidden.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.hidden.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.hidden.initContainers (and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled) (and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.hidden.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} + find {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.hidden.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: certs-volume + mountPath: /certs/CAs + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /drycc/scripts + command: + - /drycc/scripts/generate-certs.sh + args: + - -s {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + {{- if .Values.externalAccess.hidden.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.hidden.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: auto-discovery + image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.hidden.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.hidden.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.hidden.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.command "context" $) | nindent 12 }} + {{- else }} + command: + - /scripts/setup-hidden.sh + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.hidden.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.hidden.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.service.nameOverride" . }}" + - name: K8S_HIDDEN_NODE_SERVICE_NAME + value: "{{ include "mongodb.fullname" . }}-hidden-headless" + - name: MONGODB_REPLICA_SET_MODE + value: "hidden" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + {{- if and .Values.replicaSetHostnames (not .Values.externalAccess.hidden.enabled) }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_HIDDEN_NODE_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.hidden.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.hidden.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.hidden.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.hidden.extraEnvVarsCM .Values.hidden.extraEnvVarsSecret }} + envFrom: + {{- if .Values.hidden.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.hidden.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.hidden.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.hidden.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ .Values.hidden.containerPorts.mongodb }} + name: mongodb + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.hidden.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.hidden.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.hidden.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.hidden.resources }} + resources: {{- toYaml .Values.hidden.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.hidden.persistence.mountPath }} + subPath: {{ .Values.hidden.persistence.subPath }} + - name: common-scripts + mountPath: /drycc/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }} + - name: config + mountPath: /opt/drycc/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + - name: scripts + mountPath: /scripts/setup-hidden.sh + subPath: setup-hidden.sh + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: shared + mountPath: /shared + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.hidden.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + mongodb_exporter --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.hidden.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0555 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.hidden.configmapName" . }} + {{- end }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: shared + emptyDir: {} + {{- end }} + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0755 + {{- if .Values.hidden.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- end }} + {{- if not .Values.hidden.persistence.enabled }} + - name: datadir + {{- if .Values.hidden.persistence.medium }} + emptyDir: + medium: {{ .Values.hidden.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.hidden.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.hidden.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.hidden.persistence.size | quote }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.requests }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.hidden.persistence "global" .Values.global) }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/initialization-configmap.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/initialization-configmap.yaml new file mode 100644 index 00000000..f3d023ab --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/initialization-configmap.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/metrics-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/metrics-svc.yaml new file mode 100644 index 00000000..2a36dfc8 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/metrics-svc.yaml @@ -0,0 +1,33 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.ports.metrics }} + targetPort: metrics + protocol: TCP + name: http-metrics + {{- if .Values.metrics.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/networkpolicy.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/networkpolicy.yaml new file mode 100644 index 00000000..28b22388 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/networkpolicy.yaml @@ -0,0 +1,45 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if eq .Values.service.type "ClusterIP" }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.service.ports.mongodb }} + - port: {{ .Values.metrics.service.ports.metrics }} + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + from: + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + ingress: + - {} + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/prometheusrule.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/prometheusrule.yaml new file mode 100644 index 00000000..29d2ea46 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/prometheusrule.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.prometheusRule.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "mongodb.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/psp.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/psp.yaml new file mode 100644 index 00000000..e9ef023b --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/psp.yaml @@ -0,0 +1,50 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- if .Values.podSecurityPolicy.spec }} +{{ include "common.tplvalues.render" ( dict "value" .Values.podSecurityPolicy.spec "context" $ ) | nindent 2 }} +{{- else }} + allowPrivilegeEscalation: {{ .Values.podSecurityPolicy.allowPrivilegeEscalation }} + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.podSecurityContext.fsGroup }} + max: {{ .Values.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: {{ .Values.podSecurityPolicy.privileged }} + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/access-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/access-svc.yaml new file mode 100644 index 00000000..caa15f36 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/access-svc.yaml @@ -0,0 +1,32 @@ +{{- if (eq .Values.architecture "replicaset") }} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + pod: {{ $targetPod }} +spec: + type: {{ $root.Values.service.type }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.service.portName | quote }} + port: {{ $root.Values.service.ports.mongodb }} + targetPort: mongodb + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/external-access-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/external-access-svc.yaml new file mode 100644 index 00000000..f1acd6bf --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/external-access-svc.yaml @@ -0,0 +1,67 @@ +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not (eq .Values.externalAccess.service.type "ClusterIP")) false}} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + pod: {{ $targetPod }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq $root.Values.externalAccess.service.type "LoadBalancer") (eq $root.Values.externalAccess.service.type "NodePort")) }} + externalTrafficPolicy: {{ $root.Values.externalAccess.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if $root.Values.externalAccess.service.sessionAffinity }} + sessionAffinity: {{ $root.Values.externalAccess.service.sessionAffinity }} + {{- end }} + {{- if $root.Values.externalAccess.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.externalAccess.service.portName | quote }} + port: {{ $root.Values.externalAccess.service.ports.mongodb }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + {{- $nodePort := index $root.Values.externalAccess.service.nodePorts $i }} + nodePort: {{ $nodePort }} + {{- else }} + nodePort: null + {{- end }} + targetPort: mongodb + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/headless-svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/headless-svc.yaml new file mode 100644 index 00000000..78f26ab9 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/headless-svc.yaml @@ -0,0 +1,34 @@ +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.service.nameOverride" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: {{ .Values.service.portName | quote }} + port: {{ .Values.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/pdb.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/pdb.yaml new file mode 100644 index 00000000..a2b6492f --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replicaset") .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/scripts-configmap.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/scripts-configmap.yaml new file mode 100644 index 00000000..ed5a8627 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/scripts-configmap.yaml @@ -0,0 +1,301 @@ +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- if and .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + setup.sh: |- + #!/bin/bash + + . /opt/drycc/scripts/mongodb-env.sh + . /opt/drycc/scripts/libfs.sh + . /opt/drycc/scripts/liblog.sh + . /opt/drycc/scripts/libvalidations.sh + + {{- if .Values.externalAccess.enabled }} + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})" + {{- else }} + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export MONGODB_ADVERTISED_PORT_NUMBER=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- if .Values.externalAccess.service.domain }} + export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.service.domain }} + {{- else }} + export MONGODB_ADVERTISED_HOSTNAME=$MY_POD_HOST_IP + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.replicaSetConfigurationSettings.enabled }} + # placed here before root password env is overwritten + # makes no assumption about starting state + # ensures that any stepDown or non-default starting state is handled + /scripts/replicaSetConfigurationSettings.sh & + {{- end }} + + if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then + export MONGODB_ADVERTISED_PORT_NUMBER="$MONGODB_PORT_NUMBER" + fi + + info "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME" + info "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER" + + # Check for existing replica set in case there is no data in the PVC + # This is for cases where the PVC is lost or for MongoDB caches without + # persistence + current_primary="" + if is_dir_empty "${MONGODB_DATA_DIR}/db"; then + info "Data dir empty, checking if the replica set already exists" + {{- $replicaCount := int .Values.replicaCount }} + {{- $portNumber := int .Values.service.ports.mongodb }} + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} + {{- $mongoList := list }} + {{- range $e, $i := until $replicaCount }} + {{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $portNumber) }} + {{- end }} + current_primary=$(mongosh admin --host "{{ join "," $mongoList }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }}{{- if .Values.tls.enabled}} --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert{{- end }} --eval 'db.runCommand("ismaster")' | awk -F\' '/primary/ {print $2}') + + if ! is_empty_value "$current_primary"; then + info "Detected existing primary: ${current_primary}" + fi + fi + + if ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" == "$current_primary" ]]; then + info "Advertised name matches current primary, configuring node as a primary" + export MONGODB_REPLICA_SET_MODE="primary" + elif ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" != "$current_primary" ]]; then + info "Current primary is different from this node. Configuring the node as replica of ${current_primary}" + export MONGODB_REPLICA_SET_MODE="secondary" + export MONGODB_INITIAL_PRIMARY_HOST="${current_primary%:*}" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="${current_primary#*:}" + export MONGODB_SET_SECONDARY_OK="yes" + elif [[ "$MY_POD_NAME" = "{{ $fullname }}-0" ]]; then + info "Pod name matches initial primary pod name, configuring node as a primary" + export MONGODB_REPLICA_SET_MODE="primary" + else + info "Pod name doesn't match initial primary pod name, configuring node as a secondary" + export MONGODB_REPLICA_SET_MODE="secondary" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER" + fi + + if [[ "$MONGODB_REPLICA_SET_MODE" == "secondary" ]]; then + export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER" + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + export MONGODB_ROOT_PASSWORD="" + export MONGODB_EXTRA_USERNAMES="" + export MONGODB_EXTRA_DATABASES="" + export MONGODB_EXTRA_PASSWORDS="" + export MONGODB_ROOT_PASSWORD_FILE="" + export MONGODB_EXTRA_USERNAMES_FILE="" + export MONGODB_EXTRA_DATABASES_FILE="" + export MONGODB_EXTRA_PASSWORDS_FILE="" + fi + + exec /opt/drycc/scripts/mongodb/entrypoint.sh /opt/drycc/scripts/mongodb/run.sh + setup-hidden.sh: |- + #!/bin/bash + + . /opt/drycc/scripts/mongodb-env.sh + + {{- if .Values.externalAccess.hidden.enabled }} + {{- if eq .Values.externalAccess.hidden.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})" + {{- else }} + ID="${MY_POD_NAME#"{{ $fullname }}-hidden-"}" + export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.hidden.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else if eq .Values.externalAccess.hidden.service.type "NodePort" }} + ID="${MY_POD_NAME#"{{ $fullname }}-hidden-"}" + export MONGODB_ADVERTISED_PORT_NUMBER=$(echo '{{ .Values.externalAccess.hidden.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- if .Values.externalAccess.hidden.service.domain }} + export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.hidden.service.domain }} + {{- else }} + export MONGODB_ADVERTISED_HOSTNAME=$MY_POD_HOST_IP + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.replicaSetConfigurationSettings.enabled }} + # placed here before root password env is overwritten + # makes no assumption about starting state + # ensures that any stepDown or non-default starting state is handled + /scripts/replicaSetConfigurationSettings.sh & + {{- end }} + + echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME" + echo "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER" + echo "Configuring node as a hidden node" + export MONGODB_REPLICA_SET_MODE="hidden" + export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER" + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER" + export MONGODB_ROOT_PASSWORD="" + export MONGODB_EXTRA_USERNAMES="" + export MONGODB_EXTRA_DATABASES="" + export MONGODB_EXTRA_PASSWORDS="" + export MONGODB_ROOT_PASSWORD_FILE="" + export MONGODB_EXTRA_USERNAMES_FILE="" + export MONGODB_EXTRA_DATABASES_FILE="" + export MONGODB_EXTRA_PASSWORDS_FILE="" + exec /opt/drycc/scripts/mongodb/entrypoint.sh /opt/drycc/scripts/mongodb/run.sh + {{- if .Values.replicaSetConfigurationSettings.enabled }} + replicaSetConfigurationSettings.sh: |- + #!/bin/bash + # This script to be called when pod starts. + # This script sets rs settings which can not be applied via conf file + + function logger () + #$1 is the line to be logged + { + echo "replicaSetConfigurationSettings.sh -- ${1}" >&1 + } + + SLEEP_PERIOD=10 + + {{- if and .Values.auth.enabled .Values.auth.rootPassword }} + usernameAndPassword="-u root -p ${MONGODB_ROOT_PASSWORD}" + {{- else }} + usernameAndPassword="" + {{- end }} + + # load Values.replicaSetConfigurationSettings.configuration into associtive array which makes iterating and string manipulation easy + declare -A desiredRsConf + {{ range $setting, $value := .Values.replicaSetConfigurationSettings.configuration -}} + {{ printf "desiredRsConf[%s]='%v'" $setting $value }} + {{ end }} + + rsConfWriteAttempts=0 + rs_conf_configured_ok=unknown + + while [[ "${rs_conf_configured_ok}" != "true" ]]; do + + # give the rs setup a chance to succeed before attempting to read or configure + sleep ${SLEEP_PERIOD} + + counter=0 + while ! mongosh ${usernameAndPassword} --eval 'rs.conf()'; do + counter=$((${counter} +1)) + logger "not yet able to read rs.conf settings from the currently running rs (after ${counter} attempts)" + sleep ${SLEEP_PERIOD} + done + counter=$((${counter} +1)) + logger "rs.conf settings have been read from the currently running rs (after ${counter} attempts)" + + # read rs.conf again and store it. settings format is '"" : ,' + currentRsConf=$(mongosh ${usernameAndPassword} --eval 'rs.conf()') + + desiredEqualsactual=unknown + settingsToConfigure="" + for key in ${!desiredRsConf[@]}; do + value=${desiredRsConf[$key]} + if ! $(echo "\"${currentRsConf}"\" | grep -q -e "${key}: ${value},"); then + logger "rs conf setting: ${key} value will be set to: ${value}" + settingsToConfigure="${settingsToConfigure}cfg.settings.${key} = ${value}; " + desiredEqualsactual=false + else + logger "rs conf: ${key} is already at desired value: ${value}" + fi + done + + if [[ "${desiredEqualsactual}" != "false" ]]; then + logger "replicaSetConfigurationSettings match the settings of the currently running rs" + desiredEqualsactual=true + rs_conf_configured_ok=true + logger "Current settings match desired settings (There have been ${rsConfWriteAttempts} attempts to write to mongoDB rs configuration)" + exit + fi + + # apply the settings only if this member is currently the mongo replicaset PRIMARY + # it might take a little time before any pod is PRIMARY + isMaster=unknown + if ! mongosh ${usernameAndPassword} --eval 'rs.isMaster()' | grep -q "ismaster: true"; then + isMaster=false + logger "This node is not yet PRIMARY - replicaSetConfigurationSettings will only be set on the member that is currently PRIMARY" + else + isMaster=true + logger "This node is PRIMARY" + fi + + if [[ "${isMaster}" == "true" ]]; then + logger "This node is currently PRIMARY - will apply rs.conf settings" + + # avoiding tricky string substitution with single quotes by making the eval string a set of vars + rsconf="cfg = rs.conf();" + rsreconf="rs.reconfig(cfg);" + rsCommand="${rsconf} ${settingsToConfigure} ${rsreconf}" + + mongosh ${usernameAndPassword} --eval "${rsCommand}" + if [ $? -ne 0 ]; then + logger "Failed to apply mongodb cfg.settings configuration" + else + logger "mongodb replicaset cfg.settings configuration applied" + logger "Will check rs conf" + # don't exit just yet - the settings will be checked in the next loop + fi + rsConfWriteAttempts=$((${rsConfWriteAttempts} + 1 )) + fi + done + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/statefulset.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/statefulset.yaml new file mode 100644 index 00000000..eddaae09 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/statefulset.yaml @@ -0,0 +1,543 @@ +{{- if eq .Values.architecture "replicaset" }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + serviceName: {{ include "mongodb.service.nameOverride" . }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Retain + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + find {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: certs-volume + mountPath: /certs/CAs + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /drycc/scripts + command: + - /drycc/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.service.nameOverride" . }} + {{- if .Values.externalAccess.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: auto-discovery + image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- else }} + command: + - /scripts/setup.sh + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.service.nameOverride" . }}" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + {{- if and .Values.replicaSetHostnames (not .Values.externalAccess.enabled) }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ .Values.containerPorts.mongodb }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/readiness-probe.sh + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + - name: common-scripts + mountPath: /drycc/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/drycc/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{ if .Values.replicaSetConfigurationSettings.enabled }} + - name: scripts + mountPath: /scripts/replicaSetConfigurationSettings.sh + subPath: replicaSetConfigurationSettings.sh + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + mountPath: /shared + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + mongodb_exporter --collector.replicasetstatus --collector.dbstats --collector.indexstats --collector.collstats --compatible-mode --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0550 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + emptyDir: {} + {{- end }} + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0755 + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + {{- if .Values.persistence.medium }} + emptyDir: + medium: {{ .Values.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.volumeClaimTemplates.requests }} + {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.persistence.volumeClaimTemplates.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/svc.yaml new file mode 100644 index 00000000..55c56b87 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/replicaset/svc.yaml @@ -0,0 +1,43 @@ +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }} + +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + ports: + - name: {{ $root.Values.service.portName | quote }} + port: {{ $root.Values.service.ports.mongodb }} + targetPort: mongodb + {{- if $root.Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/role.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/role.yaml new file mode 100644 index 00000000..56300431 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/role.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- if .Values.rbac.rules }} +{{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} +{{- end -}} +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "mongodb.fullname" . }}] +{{- end -}} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/rolebinding.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/rolebinding.yaml new file mode 100644 index 00000000..8950f8bb --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "mongodb.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . | quote }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets-ca.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets-ca.yaml new file mode 100644 index 00000000..f054e159 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets-ca.yaml @@ -0,0 +1,37 @@ +{{- if (include "mongodb.createTlsSecret" .) }} +{{- $fullname := include "mongodb.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $cn := printf "%s.%s.svc.%s" $fullname .Release.Namespace $clusterDomain }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "mongodb.tlsSecretName" . }} + namespace: {{ template "mongodb.namespace" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if or .Values.tls.caCert .Values.tls.caKey (not .Values.tls.autoGenerated) }} + {{- $ca := buildCustomCert (required "A valid .Values.tls.caCert is required!" .Values.tls.caCert) (required "A valid .Values.tls.caKey is required!" .Values.tls.caKey) }} + {{- $cert := genSignedCert $cn nil nil 36500 $ca }} + {{- $pem := printf "%s%s" $cert.Cert $cert.Key }} + mongodb-ca-cert: {{ b64enc $ca.Cert }} + mongodb-ca-key: {{ b64enc $ca.Key }} + client-pem: {{ b64enc $pem }} + {{- else }} + {{- $ca:= genCA "myMongo-ca" 36500 }} + {{- $cert := genSignedCert $cn nil nil 36500 $ca }} + {{- $pem := printf "%s%s" $cert.Cert $cert.Key }} + mongodb-ca-cert: {{ b64enc $ca.Cert }} + mongodb-ca-key: {{ b64enc $ca.Key }} + client-pem: {{ b64enc $pem }} + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets.yaml new file mode 100644 index 00000000..acf8c483 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/secrets.yaml @@ -0,0 +1,41 @@ +{{- if (include "mongodb.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ template "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + mongodb-root-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-root-password" "providedValues" (list "auth.rootPassword" ) "context" $) }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- $customPasswords := include "mongodb.customPasswords" . -}} + {{- $passwordList := list -}} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) -}} + {{- if not (empty $customPasswords) -}} + {{- $passwordList = $customPasswords -}} + {{- else -}} + {{- $customUsersList := splitList "," $customUsers -}} + {{- $customPasswordsList := list -}} + {{- range $customUsersList -}} + {{- $customPasswordsList = append $customPasswordsList (randAlphaNum 10) -}} + {{- end -}} + {{- $passwordList = (join "," $customPasswordsList) -}} + {{- end }} + mongodb-passwords: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-passwords" "providedValues" (list "mongodbPasswords") "context" (set (deepCopy $) "Values" (dict "mongodbPasswords" $passwordList))) }} + {{- end }} + {{- if .Values.metrics.username }} + mongodb-metrics-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-metrics-password" "providedValues" (list "metrics.password" ) "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replicaset" }} + mongodb-replica-set-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-replica-set-key" "providedValues" (list "auth.replicaSetKey" ) "context" $) }} + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/serviceaccount.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/serviceaccount.yaml new file mode 100644 index 00000000..f4aa81a5 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +secrets: + - name: {{ template "mongodb.fullname" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/servicemonitor.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/servicemonitor.yaml new file mode 100644 index 00000000..0a00f719 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.serviceMonitor.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - "{{ include "mongodb.namespace" . }}" +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/dep-sts.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/dep-sts.yaml new file mode 100644 index 00000000..6ef4530c --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/dep-sts.yaml @@ -0,0 +1,474 @@ +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: {{ if .Values.useStatefulSet }}{{ include "common.capabilities.statefulset.apiVersion" . }}{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }} +kind: {{ if .Values.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }} +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + replicas: 1 + {{- if .Values.useStatefulSet }} + serviceName: {{ include "mongodb.fullname" . }} + {{- end }} + {{- if .Values.updateStrategy}} + {{- if .Values.useStatefulSet }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end}} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + find {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: certs-volume + mountPath: /certs/CAs + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /drycc/scripts + command: + - /drycc/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.service.nameOverride" . }} + {{- if .Values.externalAccess.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ .Values.containerPorts.mongodb }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/ping-mongodb.sh + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/readiness-probe.sh + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /drycc/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + - name: common-scripts + mountPath: /drycc/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/drycc/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + mongodb_exporter --collect-all --compatible-mode --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0550 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + {{- if .Values.persistence.medium }} + emptyDir: + medium: {{ .Values.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.persistence.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.useStatefulSet }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "mongodb.fullname" . }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/pvc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/pvc.yaml new file mode 100644 index 00000000..7786de63 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/pvc.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not (eq .Values.architecture "replicaset")) (not .Values.useStatefulSet) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.persistence.annotations .Values.commonAnnotations .Values.persistence.resourcePolicy }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.persistence.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.persistence.resourcePolicy }} + helm.sh/resource-policy: {{ .Values.persistence.resourcePolicy | quote }} + {{- end }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/svc.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/svc.yaml new file mode 100644 index 00000000..44255798 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/templates/standalone/svc.yaml @@ -0,0 +1,58 @@ +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + ports: + - name: {{ .Values.service.portName | quote }} + port: {{ .Values.service.ports.mongodb }} + targetPort: mongodb + {{- if and (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) .Values.service.nodePorts.mongodb }} + nodePort: {{ .Values.service.nodePorts.mongodb }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/values.schema.json b/addons/mongodb/8.0/chart/mongodb-8.0/values.schema.json new file mode 100644 index 00000000..be8e54b4 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/values.schema.json @@ -0,0 +1,173 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MongoDB® architecture", + "form": true, + "description": "Allowed values: `standalone` or `replicaset`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Authentication", + "form": true + }, + "rootUser": { + "type": "string", + "title": "MongoDB® admin user", + "form": true, + "description": "Name of the admin user. Default is root" + }, + "rootPassword": { + "type": "string", + "title": "MongoDB® admin password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + }, + "database": { + "type": "string", + "title": "MongoDB® custom database", + "description": "Name of the custom database to be created during the 1st initialization of MongoDB®", + "form": true + }, + "username": { + "type": "string", + "title": "MongoDB® custom user", + "description": "Name of the custom user to be created during the 1st initialization of MongoDB®. This user only has permissions on the MongoDB® custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for MongoDB® custom user", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + }, + "replicaSetKey": { + "type": "string", + "title": "Key used for replica set authentication", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of MongoDB® replicas", + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "configuration": { + "type": "string", + "title": "MongoDB® Custom Configuration", + "form": true, + "render": "textArea" + }, + "arbiter": { + "type": "object", + "title": "Arbiter configuration", + "form": true, + "properties": { + "configuration": { + "type": "string", + "title": "Arbiter Custom Configuration", + "form": true, + "render": "textArea", + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "hidden": { + "value": false, + "path": "persistence/enabled" + }, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/addons/mongodb/8.0/chart/mongodb-8.0/values.yaml b/addons/mongodb/8.0/chart/mongodb-8.0/values.yaml new file mode 100644 index 00000000..fca8e5c7 --- /dev/null +++ b/addons/mongodb/8.0/chart/mongodb-8.0/values.yaml @@ -0,0 +1,2048 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.namespaceOverride Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + namespaceOverride: "" + +## @section Common parameters +## + +## @param nameOverride String to partially override mongodb.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override mongodb.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## extraDeploy: +## This needs to be uncommented and added to 'extraDeploy' in order to use the replicaset 'mongo-labeler' sidecar +## for dynamically discovering the mongodb primary pod +## suggestion is to use a hard-coded and predictable TCP port for the primary mongodb pod (here is 30001, choose your own) +## - apiVersion: v1 +## kind: Service +## metadata: +## name: mongodb-primary +## namespace: the-mongodb-namespace +## labels: +## app.kubernetes.io/component: mongodb +## app.kubernetes.io/instance: mongodb +## app.kubernetes.io/managed-by: Helm +## app.kubernetes.io/name: mongodb +## spec: +## type: NodePort +## externalTrafficPolicy: Cluster +## ports: +## - name: mongodb +## port: 30001 +## nodePort: 30001 +## protocol: TCP +## targetPort: mongodb +## selector: +## app.kubernetes.io/component: mongodb +## app.kubernetes.io/instance: mongodb +## app.kubernetes.io/name: mongodb +## primary: "true" +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param commonAnnotations Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MongoDB(®) parameters +## + +## Drycc MongoDB(®) image +## ref: https://hub.docker.com/r/drycc/mongodb/tags/ +## @param image.registry MongoDB(®) image registry +## @param image.repository MongoDB(®) image registry +## @param image.tag MongoDB(®) image tag (immutable tags are recommended) +## @param image.digest MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy MongoDB(®) image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: registry.drycc.cc + repository: drycc-addons/mongodb + tag: "8.0" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## @param schedulerName Name of the scheduler (other than default) to dispatch pods +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param architecture MongoDB(®) architecture (`standalone` or `replicaset`) +## +architecture: replicaset +## @param useStatefulSet Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) +## +useStatefulSet: false +## MongoDB(®) Authentication parameters +## +auth: + ## @param auth.enabled Enable authentication + ## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ + ## + enabled: true + ## @param auth.rootUser MongoDB(®) root user + ## + rootUser: root + ## @param auth.rootPassword MongoDB(®) root password + ## ref: https://github.com/drycc/containers/tree/main/drycc/mongodb#setting-the-root-user-and-password-on-first-run + ## + rootPassword: "" + ## MongoDB(®) custom users and databases + ## ref: https://github.com/drycc/containers/tree/main/drycc/mongodb#creating-a-user-and-database-on-first-run + ## @param auth.usernames List of custom users to be created during the initialization + ## @param auth.passwords List of passwords for the custom users set at `auth.usernames` + ## @param auth.databases List of custom databases to be created during the initialization + ## + usernames: [] + passwords: [] + databases: [] + ## @param auth.username DEPRECATED: use `auth.usernames` instead + ## @param auth.password DEPRECATED: use `auth.passwords` instead + ## @param auth.database DEPRECATED: use `auth.databases` instead + username: "" + password: "" + database: "" + ## @param auth.replicaSetKey Key used for authentication in the replicaset (only when `architecture=replicaset`) + ## + replicaSetKey: "" + ## @param auth.existingSecret Existing secret with MongoDB(®) credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, ` mongodb-replica-set-key`) + ## NOTE: When it's set the previous parameters are ignored. + ## + existingSecret: "" +tls: + ## @param tls.enabled Enable MongoDB(®) TLS support between nodes in the cluster as well as between mongo clients and nodes + ## + enabled: false + ## @param tls.autoGenerated Generate a custom CA and self-signed certificates + ## + autoGenerated: true + ## @param tls.existingSecret Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`, `client-pem`) + ## NOTE: When it's set it will disable certificate creation + ## + existingSecret: "" + ## Add Custom CA certificate + ## @param tls.caCert Custom CA certificated (base64 encoded) + ## @param tls.caKey CA certificate private key (base64 encoded) + ## + caCert: "" + caKey: "" + ## Drycc Nginx image + ## @param tls.image.registry Init container TLS certs setup image registry + ## @param tls.image.repository Init container TLS certs setup image repository + ## @param tls.image.tag Init container TLS certs setup image tag (immutable tags are recommended) + ## @param tls.image.digest Init container TLS certs setup image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param tls.image.pullPolicy Init container TLS certs setup image pull policy + ## @param tls.image.pullSecrets Init container TLS certs specify docker-registry secret names as an array + ## @param tls.extraDnsNames Add extra dns names to the CA, can solve x509 auth issue for pod clients + ## + image: + registry: docker.io + repository: drycc/nginx + tag: 1.23.1-debian-11-r26 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## e.g: + ## extraDnsNames + ## "DNS.6": "$my_host" + ## "DNS.7": "$test" + ## + extraDnsNames: [] + ## @param tls.mode Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) + ## + mode: requireTLS + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param tls.resources.limits Init container generate-tls-certs resource limits + ## @param tls.resources.requests Init container generate-tls-certs resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} +## @param hostAliases Add deployment host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param replicaSetName Name of the replica set (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## +replicaSetName: rs0 +## @param replicaSetHostnames Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## Ignored when externalAccess.enabled=true +## +replicaSetHostnames: true +## @param enableIPv6 Switch to enable/disable IPv6 on MongoDB(®) +## ref: https://github.com/drycc/containers/tree/main/drycc/mongodb#enablingdisabling-ipv6 +## +enableIPv6: false +## @param directoryPerDB Switch to enable/disable DirectoryPerDB on MongoDB(®) +## ref: https://github.com/drycc/containers/tree/main/drycc/mongodb#enablingdisabling-directoryperdb +## +directoryPerDB: false +## MongoDB(®) System Log configuration +## ref: https://github.com/drycc/containers/tree/main/drycc/mongodb#configuring-system-log-verbosity-level +## @param systemLogVerbosity MongoDB(®) system log verbosity level +## @param disableSystemLog Switch to enable/disable MongoDB(®) system log +## +systemLogVerbosity: 0 +disableSystemLog: false +## @param disableJavascript Switch to enable/disable MongoDB(®) server-side JavaScript execution +## ref: https://docs.mongodb.com/manual/core/server-side-javascript/ +## +disableJavascript: false +## @param enableJournal Switch to enable/disable MongoDB(®) Journaling +## ref: https://docs.mongodb.com/manual/reference/configuration-options/#mongodb-setting-storage.journal.enabled +## +enableJournal: true +## @param configuration MongoDB(®) configuration file to be used for Primary and Secondary nodes +## For documentation of all options, see: http://docs.mongodb.org/manual/reference/configuration-options/ +## Example: +## configuration: |- +## # where and how to store data. +## storage: +## dbPath: /drycc/mongodb/data/db +## journal: +## enabled: true +## directoryPerDB: false +## # where to write logging data +## systemLog: +## destination: file +## quiet: false +## logAppend: true +## logRotate: reopen +## path: /opt/drycc/mongodb/logs/mongodb.log +## verbosity: 0 +## # network interfaces +## net: +## port: 27017 +## unixDomainSocket: +## enabled: true +## pathPrefix: /opt/drycc/mongodb/tmp +## ipv6: false +## bindIpAll: true +## # replica set options +## #replication: +## #replSetName: replicaset +## #enableMajorityReadConcern: true +## # process management optionsT +## processManagement: +## fork: false +## pidFilePath: /opt/drycc/mongodb/tmp/mongodb.pid +## # set parameter options +## setParameter: +## enableLocalhostAuthBypass: true +## # security options +## security: +## authorization: disabled +## #keyFile: /opt/drycc/mongodb/conf/keyfile +## +configuration: "" +## @section replicaSetConfigurationSettings settings applied during runtime (not via configuration file) +## If enabled, these are applied by a script which is called within setup.sh +## for documentation see https://docs.mongodb.com/manual/reference/replica-configuration/#replica-set-configuration-fields +## @param replicaSetConfigurationSettings.enabled Enable MongoDB(®) Switch to enable/disable configuring MongoDB(®) run time rs.conf settings +## @param replicaSetConfigurationSettings.configuration run-time rs.conf settings +## +replicaSetConfigurationSettings: + enabled: false + configuration: {} +## chainingAllowed : false +## heartbeatTimeoutSecs : 10 +## heartbeatIntervalMillis : 2000 +## electionTimeoutMillis : 10000 +## catchUpTimeoutMillis : 30000 +## @param existingConfigmap Name of existing ConfigMap with MongoDB(®) configuration for Primary and Secondary nodes +## NOTE: When it's set the arbiter.configuration parameter is ignored +## +existingConfigmap: "" +## @param initdbScripts Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsConfigMap Existing ConfigMap with custom initdb scripts +## +initdbScriptsConfigMap: "" +## Command and args for running the container (set to default if not set). Use array form +## @param command Override default container command (useful when using custom images) +## @param args Override default container args (useful when using custom images) +## +command: [] +args: [] +## @param extraFlags MongoDB(®) additional command line flags +## Example: +## extraFlags: +## - "--wiredTigerCacheSizeGB=2" +## +extraFlags: [] +## @param extraEnvVars Extra environment variables to add to MongoDB(®) pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @section MongoDB(®) statefulset parameters +## + +## @param annotations Additional labels to be added to the MongoDB(®) statefulset. Evaluated as a template +## +annotations: {} +## @param labels Annotations to be added to the MongoDB(®) statefulset. Evaluated as a template +## +labels: {} +## @param replicaCount Number of MongoDB(®) nodes (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## +replicaCount: 3 +## @param updateStrategy.type Strategy to use to replace existing MongoDB(®) pods. When architecture=standalone and useStatefulSet=false, +## this parameter will be applied on a deployment object. In other case it will be applied on a statefulset object +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +## Example: +## updateStrategy: +## type: RollingUpdate +## rollingUpdate: +## maxSurge: 25% +## maxUnavailable: 25% +## +updateStrategy: + type: RollingUpdate +## @param podManagementPolicy Pod management policy for MongoDB(®) +## Should be initialized one by one when building the replicaset for the first time +## +podManagementPolicy: OrderedReady +## @param podAffinityPreset MongoDB(®) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset MongoDB(®) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type MongoDB(®) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key MongoDB(®) Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values MongoDB(®) Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity MongoDB(®) Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector MongoDB(®) Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations MongoDB(®) Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints MongoDB(®) Spread Constraints for Pods +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] +## @param lifecycleHooks LifecycleHook for the MongoDB(®) container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds MongoDB(®) Termination Grace Period +## +terminationGracePeriodSeconds: "" +## @param podLabels MongoDB(®) pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations MongoDB(®) Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param priorityClassName Name of the existing priority class to be used by MongoDB(®) pod(s) +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param runtimeClassName Name of the runtime class to be used by MongoDB(®) pod(s) +## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ +## +runtimeClassName: "" +## MongoDB(®) pods' Security Context. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable MongoDB(®) pod(s)' Security Context +## @param podSecurityContext.fsGroup Group ID for the volumes of the MongoDB(®) pod(s) +## @param podSecurityContext.sysctls sysctl settings of the MongoDB(®) pod(s)' +## +podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] +## MongoDB(®) containers' Security Context (main and metrics container). +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable MongoDB(®) container(s)' Security Context +## @param containerSecurityContext.runAsUser User ID for the MongoDB(®) container +## @param containerSecurityContext.runAsNonRoot Set MongoDB(®) container's Security Context runAsNonRoot +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## MongoDB(®) containers' resource requests and limits. +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for MongoDB(®) containers +## @param resources.requests The requested resources for MongoDB(®) containers +## +resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} +## @param containerPorts.mongodb MongoDB(®) container port +containerPorts: + mongodb: 27017 +## MongoDB(®) pods' liveness probe. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 +## MongoDB(®) pods' readiness probe. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## Slow starting containers can be protected through startup probes +## Startup probes are available in Kubernetes version 1.16 and above +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 30 +## @param customLivenessProbe Override default liveness probe for MongoDB(®) containers +## Ignored when livenessProbe.enabled=true +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe for MongoDB(®) containers +## Ignored when readinessProbe.enabled=true +## +customReadinessProbe: {} +## @param customStartupProbe Override default startup probe for MongoDB(®) containers +## Ignored when startupProbe.enabled=true +## +customStartupProbe: {} +## @param initContainers Add additional init containers for the hidden node pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add additional sidecar containers for the MongoDB(®) pod(s) +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## This is an optional 'mongo-labeler' sidecar container that tracks replica-set for the primary mongodb pod +## and labels it dynamically with ' primary: "true" ' in order for an extra-deployed service to always expose +## and attach to the primary pod, this needs to be uncommented along with the suggested 'extraDeploy' example +## and the suggested rbac example for the pod to be allowed adding labels to mongo replica pods +## search 'mongo-labeler' through this file to find the sections that needs to be uncommented to make it work +## +## - name: mongo-labeler +## image: korenlev/k8s-mongo-labeler-sidecar +## imagePullPolicy: Always +## env: +## - name: LABEL_SELECTOR +## value: "app.kubernetes.io/component=mongodb,app.kubernetes.io/instance=mongodb,app.kubernetes.io/name=mongodb" +## - name: NAMESPACE +## value: "the-mongodb-namespace" +## - name: DEBUG +## value: "true" +## +sidecars: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MongoDB(®) container(s) +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes to the MongoDB(®) statefulset +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## MongoDB(®) Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation for MongoDB(®) pod(s) + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of MongoDB(®) pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of MongoDB(®) pods that may be made unavailable after the eviction + ## + maxUnavailable: "" + +## @section Traffic exposure parameters +## + +## Service parameters +## +service: + ## @param service.nameOverride MongoDB(®) service name + ## + nameOverride: "" + ## @param service.type Kubernetes Service type (only for standalone architecture) + ## + type: ClusterIP + ## @param service.portName MongoDB(®) service port name (only for standalone architecture) + ## + portName: mongodb + ## @param service.ports.mongodb MongoDB(®) service port. + ## + ports: + mongodb: 27017 + ## @param service.nodePorts.mongodb Port to bind to for NodePort and LoadBalancer service types (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mongodb: "" + ## @param service.clusterIP MongoDB(®) service cluster IP (only for standalone architecture) + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.externalIPs Specify the externalIP value ClusterIP service type (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.loadBalancerIP loadBalancerIP for MongoDB(®) Service (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer (only for standalone architecture) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations Provide any additional annotations that may be required + ## + annotations: {} + ## @param service.externalTrafficPolicy service external traffic policy (only for standalone architecture) + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## External Access to MongoDB(®) nodes configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to MongoDB(®) nodes (only for replicaset architecture) + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs by querying the K8s API + ## + enabled: false + ## Drycc Kubectl image + ## ref: https://hub.docker.com/r/drycc/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Init container auto-discovery image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: drycc/kubectl + tag: 1.25.2-debian-11-r2 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param externalAccess.autoDiscovery.resources.limits Init container auto-discovery resource limits + ## @param externalAccess.autoDiscovery.resources.requests Init container auto-discovery resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Parameters to configure K8s service(s) used to externally access MongoDB(®) + ## A new service per broker will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.service.portName MongoDB(®) port name used for external access when service type is LoadBalancer + ## + portName: "mongodb" + ## @param externalAccess.service.ports.mongodb MongoDB(®) port used for external access when service type is LoadBalancer + ## + ports: + mongodb: 27017 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for MongoDB(®) nodes + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.service.externalTrafficPolicy MongoDB(®) service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param externalAccess.service.nodePorts Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.service.domain Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort + ## If not specified, the container will try to get the kubernetes node external IP + ## e.g: + ## domain: mydomain.com + ## + domain: "" + ## @param externalAccess.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param externalAccess.service.annotations Service annotations for external access + ## + annotations: + {} + ## @param externalAccess.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param externalAccess.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## External Access to MongoDB(®) Hidden nodes configuration + ## + hidden: + ## @param externalAccess.hidden.enabled Enable Kubernetes external cluster access to MongoDB(®) hidden nodes + ## + enabled: false + ## Parameters to configure K8s service(s) used to externally access MongoDB(®) + ## A new service per broker will be created + ## + service: + ## @param externalAccess.hidden.service.type Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer + ## + type: LoadBalancer + ## @param externalAccess.hidden.service.portName MongoDB(®) port name used for external access when service type is LoadBalancer + ## + portName: "mongodb" + ## @param externalAccess.hidden.service.ports.mongodb MongoDB(®) port used for external access when service type is LoadBalancer + ## + ports: + mongodb: 27017 + ## @param externalAccess.hidden.service.loadBalancerIPs Array of load balancer IPs for MongoDB(®) nodes + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.hidden.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.hidden.service.externalTrafficPolicy MongoDB(®) service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param externalAccess.hidden.service.nodePorts Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.hidden.service.domain Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort + ## If not specified, the container will try to get the kubernetes node external IP + ## e.g: + ## domain: mydomain.com + ## + domain: "" + ## @param externalAccess.hidden.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param externalAccess.hidden.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.hidden.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param externalAccess.hidden.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Persistence parameters +## + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable MongoDB(®) data persistence using PVC + ## + enabled: true + ## @param persistence.medium Provide a medium for `emptyDir` volumes. + ## Requires persistence.enabled: false + ## + medium: "" + ## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## Ignored when mongodb.architecture=replicaset + ## + existingClaim: "" + ## @param persistence.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + resourcePolicy: "" + ## @param persistence.storageClass PVC Storage Class for MongoDB(®) data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param persistence.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MongoDB(®) data volume + ## + size: 8Gi + ## @param persistence.annotations PVC annotations + ## + annotations: {} + ## @param persistence.mountPath Path to mount the volume at + ## MongoDB(®) images. + ## + mountPath: /drycc/mongodb + ## @param persistence.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param persistence.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param persistence.volumeClaimTemplates.requests Custom PVC requests attributes + ## Sometime cloud providers use additional requests attributes to provision custom storage instance + ## See https://cloud.ibm.com/docs/containers?topic=containers-file_storage#file_dynamic_statefulset + ## + requests: {} + ## @param persistence.volumeClaimTemplates.dataSource Add dataSource to the VolumeClaimTemplate + ## + dataSource: {} + +## @section RBAC parameters +## + +## ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for MongoDB(®) pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the mongodb.fullname template + ## + name: "" + ## @param serviceAccount.annotations Additional Service Account annotations + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding MongoDB(®) ServiceAccount to a role + ## that allows MongoDB(®) pods querying the K8s API + ## this needs to be set to 'true' to enable the mongo-labeler sidecar primary mongodb discovery + ## + create: false + ## @param rbac.rules Custom rules to create following the role specification + ## The example below needs to be uncommented to use the 'mongo-labeler' sidecar for dynamic discovery of the primary mongodb pod: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## - watch + ## - update + ## + rules: [] +## PodSecurityPolicy configuration +## Be sure to also set rbac.create to true, otherwise Role and RoleBinding won't be created. +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.allowPrivilegeEscalation Enable privilege escalation + ## Either use predefined policy with some adjustments or use `podSecurityPolicy.spec` + ## + allowPrivilegeEscalation: false + ## @param podSecurityPolicy.privileged Allow privileged + ## + privileged: false + ## @param podSecurityPolicy.spec Specify the full spec to use for Pod Security Policy + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## Defining a spec ignores the above values. + ## + spec: {} + ## Example: + ## allowPrivilegeEscalation: false + ## fsGroup: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## hostIPC: false + ## hostNetwork: false + ## hostPID: false + ## privileged: false + ## readOnlyRootFilesystem: false + ## requiredDropCapabilities: + ## - ALL + ## runAsUser: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## seLinux: + ## rule: 'RunAsAny' + ## supplementalGroups: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## volumes: + ## - 'configMap' + ## - 'secret' + ## - 'emptyDir' + ## - 'persistentVolumeClaim' + ## + +## @section Volume Permissions parameters +## +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: drycc/drycc-shell + tag: 11-debian-11-r37 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false and shmVolume.chmod.enabled=false + ## @param volumePermissions.securityContext.runAsUser User ID for the volumePermissions container + ## + securityContext: + runAsUser: 0 + +## @section Arbiter parameters +## + +arbiter: + ## @param arbiter.enabled Enable deploying the arbiter + ## https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + ## + enabled: false + ## @param arbiter.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param arbiter.configuration Arbiter configuration file to be used + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + ## @param arbiter.existingConfigmap Name of existing ConfigMap with Arbiter configuration + ## NOTE: When it's set the arbiter.configuration parameter is ignored + ## + existingConfigmap: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param arbiter.command Override default container command (useful when using custom images) + ## @param arbiter.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## @param arbiter.extraFlags Arbiter additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + ## @param arbiter.extraEnvVars Extra environment variables to add to Arbiter pods + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param arbiter.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param arbiter.extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param arbiter.annotations Additional labels to be added to the Arbiter statefulset + ## + annotations: {} + ## @param arbiter.labels Annotations to be added to the Arbiter statefulset + ## + labels: {} + ## @param arbiter.topologySpreadConstraints MongoDB(®) Spread Constraints for arbiter Pods + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + ## @param arbiter.lifecycleHooks LifecycleHook for the Arbiter container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param arbiter.terminationGracePeriodSeconds Arbiter Termination Grace Period + ## + terminationGracePeriodSeconds: "" + ## @param arbiter.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param arbiter.podManagementPolicy Pod management policy for MongoDB(®) + ## Should be initialized one by one when building the replicaset for the first time + ## + podManagementPolicy: OrderedReady + ## @param arbiter.schedulerName Name of the scheduler (other than default) to dispatch pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param arbiter.podAffinityPreset Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param arbiter.podAntiAffinityPreset Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param arbiter.nodeAffinityPreset.type Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param arbiter.nodeAffinityPreset.key Arbiter Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param arbiter.nodeAffinityPreset.values Arbiter Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param arbiter.affinity Arbiter Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: arbiter.podAffinityPreset, arbiter.podAntiAffinityPreset, and arbiter.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param arbiter.nodeSelector Arbiter Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param arbiter.tolerations Arbiter Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param arbiter.podLabels Arbiter pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param arbiter.podAnnotations Arbiter Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param arbiter.priorityClassName Name of the existing priority class to be used by Arbiter pod(s) + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param arbiter.runtimeClassName Name of the runtime class to be used by Arbiter pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## MongoDB(®) Arbiter pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param arbiter.podSecurityContext.enabled Enable Arbiter pod(s)' Security Context + ## @param arbiter.podSecurityContext.fsGroup Group ID for the volumes of the Arbiter pod(s) + ## @param arbiter.podSecurityContext.sysctls sysctl settings of the Arbiter pod(s)' + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + ## MongoDB(®) Arbiter containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param arbiter.containerSecurityContext.enabled Enable Arbiter container(s)' Security Context + ## @param arbiter.containerSecurityContext.runAsUser User ID for the Arbiter container + ## @param arbiter.containerSecurityContext.runAsNonRoot Set Arbiter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MongoDB(®) Arbiter containers' resource requests and limits. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param arbiter.resources.limits The resources limits for Arbiter containers + ## @param arbiter.resources.requests The requested resources for Arbiter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## @param arbiter.containerPorts.mongodb MongoDB(®) arbiter container port + ## + containerPorts: + mongodb: 27017 + ## MongoDB(®) Arbiter pods' liveness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.livenessProbe.enabled Enable livenessProbe + ## @param arbiter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param arbiter.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param arbiter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param arbiter.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param arbiter.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Arbiter pods' readiness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.readinessProbe.enabled Enable readinessProbe + ## @param arbiter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param arbiter.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param arbiter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param arbiter.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param arbiter.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Arbiter pods' startup probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.startupProbe.enabled Enable startupProbe + ## @param arbiter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param arbiter.startupProbe.periodSeconds Period seconds for startupProbe + ## @param arbiter.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param arbiter.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param arbiter.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param arbiter.customLivenessProbe Override default liveness probe for Arbiter containers + ## Ignored when arbiter.livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param arbiter.customReadinessProbe Override default readiness probe for Arbiter containers + ## Ignored when arbiter.readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param arbiter.customStartupProbe Override default startup probe for Arbiter containers + ## Ignored when arbiter.startupProbe.enabled=true + ## + customStartupProbe: {} + ## @param arbiter.initContainers Add additional init containers for the Arbiter pod(s) + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param arbiter.sidecars Add additional sidecar containers for the Arbiter pod(s) + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param arbiter.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Arbiter container(s) + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## + extraVolumeMounts: [] + ## @param arbiter.extraVolumes Optionally specify extra list of additional volumes to the Arbiter statefulset + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + ## + extraVolumes: [] + ## MongoDB(®) Arbiter Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param arbiter.pdb.create Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) + ## + create: false + ## @param arbiter.pdb.minAvailable Minimum number/percentage of Arbiter pods that should remain scheduled + ## + minAvailable: 1 + ## @param arbiter.pdb.maxUnavailable Maximum number/percentage of Arbiter pods that may be made unavailable + ## + maxUnavailable: "" + ## MongoDB(®) Arbiter service parameters + ## + service: + ## @param arbiter.service.nameOverride The arbiter service name + ## + nameOverride: "" + ## @param arbiter.service.ports.mongodb MongoDB(®) service port + ## + ports: + mongodb: 27017 + ## @param arbiter.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param arbiter.service.annotations Provide any additional annotations that may be required + ## + annotations: {} + +## @section Hidden Node parameters +## + +hidden: + ## @param hidden.enabled Enable deploying the hidden nodes + ## https://docs.mongodb.com/manual/tutorial/configure-a-hidden-replica-set-member/ + ## + enabled: false + ## @param hidden.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param hidden.configuration Hidden node configuration file to be used + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + ## @param hidden.existingConfigmap Name of existing ConfigMap with Hidden node configuration + ## NOTE: When it's set the hidden.configuration parameter is ignored + ## + existingConfigmap: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param hidden.command Override default container command (useful when using custom images) + ## @param hidden.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## @param hidden.extraFlags Hidden node additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + ## @param hidden.extraEnvVars Extra environment variables to add to Hidden node pods + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param hidden.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param hidden.extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param hidden.annotations Additional labels to be added to thehidden node statefulset + ## + annotations: {} + ## @param hidden.labels Annotations to be added to the hidden node statefulset + ## + labels: {} + ## @param hidden.topologySpreadConstraints MongoDB(®) Spread Constraints for hidden Pods + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + ## @param hidden.lifecycleHooks LifecycleHook for the Hidden container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param hidden.replicaCount Number of hidden nodes (only when `architecture=replicaset`) + ## Ignored when mongodb.architecture=standalone + ## + replicaCount: 1 + ## @param hidden.terminationGracePeriodSeconds Hidden Termination Grace Period + ## + terminationGracePeriodSeconds: "" + ## @param hidden.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param hidden.podManagementPolicy Pod management policy for hidden node + ## + podManagementPolicy: OrderedReady + ## @param hidden.schedulerName Name of the scheduler (other than default) to dispatch pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param hidden.podAffinityPreset Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param hidden.podAntiAffinityPreset Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## @param hidden.nodeAffinityPreset.type Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param hidden.nodeAffinityPreset.key Hidden Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param hidden.nodeAffinityPreset.values Hidden Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param hidden.affinity Hidden node Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param hidden.nodeSelector Hidden node Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param hidden.tolerations Hidden node Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param hidden.podLabels Hidden node pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param hidden.podAnnotations Hidden node Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param hidden.priorityClassName Name of the existing priority class to be used by hidden node pod(s) + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param hidden.runtimeClassName Name of the runtime class to be used by hidden node pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## MongoDB(®) Hidden pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param hidden.podSecurityContext.enabled Enable Hidden pod(s)' Security Context + ## @param hidden.podSecurityContext.fsGroup Group ID for the volumes of the Hidden pod(s) + ## @param hidden.podSecurityContext.sysctls sysctl settings of the Hidden pod(s)' + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + ## MongoDB(®) Hidden containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param hidden.containerSecurityContext.enabled Enable Hidden container(s)' Security Context + ## @param hidden.containerSecurityContext.runAsUser User ID for the Hidden container + ## @param hidden.containerSecurityContext.runAsNonRoot Set Hidden containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MongoDB(®) Hidden containers' resource requests and limits. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param hidden.resources.limits The resources limits for hidden node containers + ## @param hidden.resources.requests The req0ested resources for hidden node containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## @param hidden.containerPorts.mongodb MongoDB(®) hidden container port + containerPorts: + mongodb: 27017 + ## MongoDB(®) Hidden pods' liveness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param hidden.livenessProbe.enabled Enable livenessProbe + ## @param hidden.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param hidden.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param hidden.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param hidden.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param hidden.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Hidden pods' readiness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param hidden.readinessProbe.enabled Enable readinessProbe + ## @param hidden.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param hidden.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param hidden.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param hidden.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param hidden.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## Slow starting containers can be protected through startup probes + ## Startup probes are available in Kubernetes version 1.16 and above + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + ## @param hidden.startupProbe.enabled Enable startupProbe + ## @param hidden.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param hidden.startupProbe.periodSeconds Period seconds for startupProbe + ## @param hidden.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param hidden.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param hidden.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param hidden.customLivenessProbe Override default liveness probe for hidden node containers + ## Ignored when hidden.livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param hidden.customReadinessProbe Override default readiness probe for hidden node containers + ## Ignored when hidden.readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param hidden.customStartupProbe Override default startup probe for MongoDB(®) containers + ## Ignored when hidden.startupProbe.enabled=true + ## + customStartupProbe: {} + ## @param hidden.initContainers Add init containers to the MongoDB(®) Hidden pods. + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param hidden.sidecars Add additional sidecar containers for the hidden node pod(s) + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param hidden.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the hidden node container(s) + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## + extraVolumeMounts: [] + ## @param hidden.extraVolumes Optionally specify extra list of additional volumes to the hidden node statefulset + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + ## + extraVolumes: [] + ## MongoDB(®) Hidden Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param hidden.pdb.create Enable/disable a Pod Disruption Budget creation for hidden node pod(s) + ## + create: false + ## @param hidden.pdb.minAvailable Minimum number/percentage of hidden node pods that should remain scheduled + ## + minAvailable: 1 + ## @param hidden.pdb.maxUnavailable Maximum number/percentage of hidden node pods that may be made unavailable + ## + maxUnavailable: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param hidden.persistence.enabled Enable hidden node data persistence using PVC + ## + enabled: true + ## @param hidden.persistence.medium Provide a medium for `emptyDir` volumes. + ## Requires hidden.persistence.enabled: false + ## + medium: "" + ## @param hidden.persistence.storageClass PVC Storage Class for hidden node data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param hidden.persistence.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param hidden.persistence.size PVC Storage Request for hidden node data volume + ## + size: 8Gi + ## @param hidden.persistence.annotations PVC annotations + ## + annotations: {} + ## @param hidden.persistence.mountPath The path the volume will be mounted at, useful when using different MongoDB(®) images. + ## + mountPath: /drycc/mongodb + ## @param hidden.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param hidden.persistence.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param hidden.persistence.volumeClaimTemplates.requests Custom PVC requests attributes + ## Sometime cloud providers use additional requests attributes to provision custom storage instance + ## See https://cloud.ibm.com/docs/containers?topic=containers-file_storage#file_dynamic_statefulset + ## + requests: {} + ## @param hidden.persistence.volumeClaimTemplates.dataSource Set volumeClaimTemplate dataSource + ## + dataSource: {} + service: + ## @param hidden.service.portName MongoDB(®) service port name + ## + portName: "mongodb" + ## @param hidden.service.ports.mongodb MongoDB(®) service port + ## + ports: + mongodb: 27017 + ## @param hidden.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param hidden.service.annotations Provide any additional annotations that may be required + ## + annotations: {} + +## @section Metrics parameters +## + +metrics: + ## @param metrics.enabled Enable using a sidecar Prometheus exporter + ## + enabled: true + ## Drycc MongoDB(®) Promtheus Exporter image + ## ref: https://hub.docker.com/r/drycc/mongodb-exporter/tags/ + ## @param metrics.image.registry MongoDB(®) Prometheus exporter image registry + ## @param metrics.image.repository MongoDB(®) Prometheus exporter image repository + ## @param metrics.image.tag MongoDB(®) Prometheus exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy MongoDB(®) Prometheus exporter image pull policy + ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/mongodb-exporter + tag: 0 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.username String with username for the metrics exporter + ## If undefined the root user will be used for the metrics exporter + username: "user_exporter" + ## @param metrics.password String with password for the metrics exporter + ## If undefined but metrics.username is defined, a random password will be generated + password: "" + ## @param metrics.extraFlags String with extra flags to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + ## + extraFlags: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param metrics.command Override default container command (useful when using custom images) + ## @param metrics.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## Metrics exporter container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits The resources limits for Prometheus exporter containers + ## @param metrics.resources.requests The requested resources for Prometheus exporter containers + ## + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + ## @param metrics.containerPort Port of the Prometheus metrics container + ## + containerPort: 9216 + ## Prometheus Exporter service configuration + ## + service: + ## @param metrics.service.annotations [object] Annotations for Prometheus Exporter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## @param metrics.service.type Type of the Prometheus metrics service + ## + type: ClusterIP + ## @param metrics.service.ports.metrics Port of the Prometheus metrics service + ## + ports: + metrics: 9216 + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## Metrics exporter liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## Metrics exporter readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## Slow starting containers can be protected through startup probes + ## Startup probes are available in Kubernetes version 1.16 and above + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + ## @param metrics.startupProbe.enabled Enable startupProbe + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param metrics.customLivenessProbe Override default liveness probe for MongoDB(®) containers + ## Ignored when livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Override default readiness probe for MongoDB(®) containers + ## Ignored when readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Override default startup probe for MongoDB(®) containers + ## Ignored when startupProbe.enabled=true + ## + customStartupProbe: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.labels Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/drycc/charts/tree/master/drycc/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace Namespace where prometheusRules resource should be created + ## + namespace: "" + ## @param metrics.prometheusRule.rules Rules to be created, check values for an example + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + ## + ## This is an example of a rule, you should add the below code block under the "rules" param, removing the brackets + ## rules: + ## - alert: HighRequestLatency + ## expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + ## for: 10m + ## labels: + ## severity: page + ## annotations: + ## summary: High request latency + ## + rules: [] + +## Mongodb Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port Mongodb is listening + ## on. When true, Mongodb will accept connections from any source + ## (with the correct destination port). + ## + allowCurrentNamespace: true + allowNamespaces: [] diff --git a/addons/mongodb/8.0/meta.yaml b/addons/mongodb/8.0/meta.yaml new file mode 100644 index 00000000..c61c1b3e --- /dev/null +++ b/addons/mongodb/8.0/meta.yaml @@ -0,0 +1,24 @@ +name: mongodb-8.0 +version: 8.0 +id: 7a599bfe-42fe-45f0-a7e5-d706cceeb75a +description: "mongodb-8.0" +displayName: "mongodb-8.0" +metadata: + displayName: "mongodb-8.0" + provider: + name: drycc + supportURL: https://www.mongodb.com/docs/manual/ + documentationURL: https://www.mongodb.com/docs/manual/ +tags: mongodb +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: + - name: "service.type" + required: false + description: "service type config for values.yaml" + - name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +archive: false diff --git a/addons/mongodb/8.0/plans/standard-16c64g400/bind.yaml b/addons/mongodb/8.0/plans/standard-16c64g400/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-16c64g400/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-16c64g400/instance-schema.json b/addons/mongodb/8.0/plans/standard-16c64g400/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-16c64g400/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-16c64g400/meta.yaml b/addons/mongodb/8.0/plans/standard-16c64g400/meta.yaml new file mode 100644 index 00000000..0be71694 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-16c64g400/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c64g400" +id: 6f7a8b9c-0d1e-2f3a-4b5c-6d7e8f9a0b1c +description: "mongodb standard-16c64g400 plan: Disk 400Gi ,vCPUs 16 , RAM 64G " +displayName: "standard-16c64g400" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-16c64g400/values.yaml b/addons/mongodb/8.0/plans/standard-16c64g400/values.yaml new file mode 100644 index 00000000..400e61dc --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-16c64g400/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-16c64g400 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 16000m + memory: 64Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 400Gi diff --git a/addons/mongodb/8.0/plans/standard-1c2g10/bind.yaml b/addons/mongodb/8.0/plans/standard-1c2g10/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-1c2g10/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-1c2g10/instance-schema.json b/addons/mongodb/8.0/plans/standard-1c2g10/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-1c2g10/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-1c2g10/meta.yaml b/addons/mongodb/8.0/plans/standard-1c2g10/meta.yaml new file mode 100644 index 00000000..7f2ea8f7 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-1c2g10/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g10" +id: 1a2b3c4d-5e6f-7a8b-9c0d-1e2f3a4b5c6d +description: "mongodb standard-1c2g10 plan: Disk 10Gi ,vCPUs 1 , RAM 2G " +displayName: "standard-1c2g10" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-1c2g10/values.yaml b/addons/mongodb/8.0/plans/standard-1c2g10/values.yaml new file mode 100644 index 00000000..2e026ef2 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-1c2g10/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-1c2g10 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 10Gi diff --git a/addons/mongodb/8.0/plans/standard-2c4g20/bind.yaml b/addons/mongodb/8.0/plans/standard-2c4g20/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c4g20/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-2c4g20/instance-schema.json b/addons/mongodb/8.0/plans/standard-2c4g20/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c4g20/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-2c4g20/meta.yaml b/addons/mongodb/8.0/plans/standard-2c4g20/meta.yaml new file mode 100644 index 00000000..75802a82 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c4g20/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g20" +id: 2b3c4d5e-6f7a-8b9c-0d1e-2f3a4b5c6d7e +description: "mongodb standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G " +displayName: "standard-2c4g20" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-2c4g20/values.yaml b/addons/mongodb/8.0/plans/standard-2c4g20/values.yaml new file mode 100644 index 00000000..20dd90d0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c4g20/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-2c4g20 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 20Gi diff --git a/addons/mongodb/8.0/plans/standard-2c8g50/bind.yaml b/addons/mongodb/8.0/plans/standard-2c8g50/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c8g50/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-2c8g50/instance-schema.json b/addons/mongodb/8.0/plans/standard-2c8g50/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c8g50/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-2c8g50/meta.yaml b/addons/mongodb/8.0/plans/standard-2c8g50/meta.yaml new file mode 100644 index 00000000..46a91f28 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c8g50/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c8g50" +id: 3c4d5e6f-7a8b-9c0d-1e2f-3a4b5c6d7e8f +description: "mongodb standard-2c8g50 plan: Disk 50Gi ,vCPUs 2 , RAM 8G " +displayName: "standard-2c8g50" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-2c8g50/values.yaml b/addons/mongodb/8.0/plans/standard-2c8g50/values.yaml new file mode 100644 index 00000000..0eaf31f4 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-2c8g50/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-2c8g50 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 2000m + memory: 8Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 50Gi diff --git a/addons/mongodb/8.0/plans/standard-32c128g800/bind.yaml b/addons/mongodb/8.0/plans/standard-32c128g800/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-32c128g800/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-32c128g800/instance-schema.json b/addons/mongodb/8.0/plans/standard-32c128g800/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-32c128g800/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-32c128g800/meta.yaml b/addons/mongodb/8.0/plans/standard-32c128g800/meta.yaml new file mode 100644 index 00000000..01c59996 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-32c128g800/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c128g800" +id: 7a8b9c0d-1e2f-3a4b-5c6d-7e8f9a0b1c2d +description: "mongodb standard-32c128g800 plan: Disk 800Gi ,vCPUs 32 , RAM 128G " +displayName: "standard-32c128g800" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-32c128g800/values.yaml b/addons/mongodb/8.0/plans/standard-32c128g800/values.yaml new file mode 100644 index 00000000..7c95075e --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-32c128g800/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-32c128g800 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 32000m + memory: 128Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 800Gi diff --git a/addons/mongodb/8.0/plans/standard-4c16g100/bind.yaml b/addons/mongodb/8.0/plans/standard-4c16g100/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-4c16g100/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-4c16g100/instance-schema.json b/addons/mongodb/8.0/plans/standard-4c16g100/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-4c16g100/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-4c16g100/meta.yaml b/addons/mongodb/8.0/plans/standard-4c16g100/meta.yaml new file mode 100644 index 00000000..2de93908 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-4c16g100/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g100" +id: 4d5e6f7a-8b9c-0d1e-2f3a-4b5c6d7e8f9a +description: "mongodb standard-4c16g100 plan: Disk 100Gi ,vCPUs c , RAM 16G " +displayName: "standard-4c16g100" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-4c16g100/values.yaml b/addons/mongodb/8.0/plans/standard-4c16g100/values.yaml new file mode 100644 index 00000000..bd4a6497 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-4c16g100/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-4c16g100 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 4000m + memory: 16Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 100Gi diff --git a/addons/mongodb/8.0/plans/standard-8c32g200/bind.yaml b/addons/mongodb/8.0/plans/standard-8c32g200/bind.yaml new file mode 100644 index 00000000..a8ce0395 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-8c32g200/bind.yaml @@ -0,0 +1,36 @@ +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- range $i, $e := until $replicaCount }} + - name: {{ printf "EXTRANET_HOST_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "HOSTNAME_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-%d" $fullName $i }} + jsonpath: '{ .spec.clusterIP }' +{{- end }} + +{{- range $i, $e := until $replicaCount }} + - name: {{ printf "DOMAIN_%d" $i }} + value: {{ printf "%s-%d" $fullName $i }}.{{$fullName}}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} +{{- end }} + - name: REPLICA_SET_NAME + value: 'rs0' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.mongodb-root-password }' + - name: USERNAME + value: 'root' + - name: PORT + value: 27017 \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-8c32g200/instance-schema.json b/addons/mongodb/8.0/plans/standard-8c32g200/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-8c32g200/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/mongodb/8.0/plans/standard-8c32g200/meta.yaml b/addons/mongodb/8.0/plans/standard-8c32g200/meta.yaml new file mode 100644 index 00000000..b5626dfb --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-8c32g200/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g200" +id: 5e6f7a8b-9c0d-1e2f-3a4b-5c6d7e8f9a0b +description: "mongodb standard-8c32g200 plan: Disk 200Gi ,vCPUs 8 , RAM 32G " +displayName: "standard-8c32g200" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/mongodb/8.0/plans/standard-8c32g200/values.yaml b/addons/mongodb/8.0/plans/standard-8c32g200/values.yaml new file mode 100644 index 00000000..c29f09d4 --- /dev/null +++ b/addons/mongodb/8.0/plans/standard-8c32g200/values.yaml @@ -0,0 +1,23 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-mongo-cluster-standard-8c32g200 + +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + limits: + cpu: 8000m + memory: 32Gi + requests: + cpu: 100m + memory: 1Gi + +persistence: + size: 200Gi From 9d7ab98776c1be8f56062ecad9bd7692198891ea Mon Sep 17 00:00:00 2001 From: Eamon Date: Wed, 4 Feb 2026 13:48:10 +0800 Subject: [PATCH 88/93] fix(addons): fix index mongo version description (#127) --- addons/index.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/addons/index.yaml b/addons/index.yaml index cdbe36d5..2bfb50af 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -67,7 +67,6 @@ entries: mongodb: - version: 7.0 description: "MongoDB is a document database designed for ease of application development and scaling." - mongodb: - version: 8.0 description: "MongoDB is a document database designed for ease of application development and scaling." clickhouse: From 3703d498903aec1df14fde0a045b3c850014e7fc Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 5 Feb 2026 14:14:16 +0800 Subject: [PATCH 89/93] chore(opensearch): adjust the minimum memory to 6G --- addons/opensearch/2.10/plans/standard-2c4g32/meta.yaml | 6 ------ addons/opensearch/2.10/plans/standard-2c4g64/meta.yaml | 6 ------ .../plans/{standard-2c4g32 => standard-2c6g32}/bind.yaml | 0 .../instance-schema.json | 0 addons/opensearch/2.10/plans/standard-2c6g32/meta.yaml | 6 ++++++ .../plans/{standard-2c4g32 => standard-2c6g32}/values.yaml | 4 ++-- .../plans/{standard-2c4g64 => standard-2c6g64}/bind.yaml | 0 .../instance-schema.json | 0 addons/opensearch/2.10/plans/standard-2c6g64/meta.yaml | 6 ++++++ .../plans/{standard-2c4g64 => standard-2c6g64}/values.yaml | 4 ++-- addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml | 6 ------ addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml | 6 ------ .../plans/{standard-2c4g32 => standard-2c6g32}/bind.yaml | 0 .../instance-schema.json | 0 addons/opensearch/3.0/plans/standard-2c6g32/meta.yaml | 6 ++++++ .../plans/{standard-2c4g32 => standard-2c6g32}/values.yaml | 4 ++-- .../plans/{standard-2c4g64 => standard-2c6g64}/bind.yaml | 0 .../instance-schema.json | 0 addons/opensearch/3.0/plans/standard-2c6g64/meta.yaml | 6 ++++++ .../plans/{standard-2c4g64 => standard-2c6g64}/values.yaml | 4 ++-- 20 files changed, 32 insertions(+), 32 deletions(-) delete mode 100644 addons/opensearch/2.10/plans/standard-2c4g32/meta.yaml delete mode 100644 addons/opensearch/2.10/plans/standard-2c4g64/meta.yaml rename addons/opensearch/2.10/plans/{standard-2c4g32 => standard-2c6g32}/bind.yaml (100%) rename addons/opensearch/2.10/plans/{standard-2c4g32 => standard-2c6g32}/instance-schema.json (100%) create mode 100644 addons/opensearch/2.10/plans/standard-2c6g32/meta.yaml rename addons/opensearch/2.10/plans/{standard-2c4g32 => standard-2c6g32}/values.yaml (98%) rename addons/opensearch/2.10/plans/{standard-2c4g64 => standard-2c6g64}/bind.yaml (100%) rename addons/opensearch/2.10/plans/{standard-2c4g64 => standard-2c6g64}/instance-schema.json (100%) create mode 100644 addons/opensearch/2.10/plans/standard-2c6g64/meta.yaml rename addons/opensearch/2.10/plans/{standard-2c4g64 => standard-2c6g64}/values.yaml (98%) delete mode 100644 addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml delete mode 100644 addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml rename addons/opensearch/3.0/plans/{standard-2c4g32 => standard-2c6g32}/bind.yaml (100%) rename addons/opensearch/3.0/plans/{standard-2c4g32 => standard-2c6g32}/instance-schema.json (100%) create mode 100644 addons/opensearch/3.0/plans/standard-2c6g32/meta.yaml rename addons/opensearch/3.0/plans/{standard-2c4g32 => standard-2c6g32}/values.yaml (98%) rename addons/opensearch/3.0/plans/{standard-2c4g64 => standard-2c6g64}/bind.yaml (100%) rename addons/opensearch/3.0/plans/{standard-2c4g64 => standard-2c6g64}/instance-schema.json (100%) create mode 100644 addons/opensearch/3.0/plans/standard-2c6g64/meta.yaml rename addons/opensearch/3.0/plans/{standard-2c4g64 => standard-2c6g64}/values.yaml (98%) diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/meta.yaml b/addons/opensearch/2.10/plans/standard-2c4g32/meta.yaml deleted file mode 100644 index 72cf6dee..00000000 --- a/addons/opensearch/2.10/plans/standard-2c4g32/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-2c4g32" -id: 278b07be-4bd3-4618-b97e-6fa6d471709d -description: "Opensearch standard-2c4g32 plan which limit resources 2 cores 4Gi memory and persistence size 32Gi." -displayName: "standard-2c4g32" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/meta.yaml b/addons/opensearch/2.10/plans/standard-2c4g64/meta.yaml deleted file mode 100644 index 9eaa8ad3..00000000 --- a/addons/opensearch/2.10/plans/standard-2c4g64/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-2c4g64" -id: 79168ebf-5508-4425-a685-47a6d5ccb512 -description: "Opensearch standard-2c4g64 plan which limit resources 2 cores 4Gi memory and persistence size 64Gi." -displayName: "standard-2c4g64" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/bind.yaml b/addons/opensearch/2.10/plans/standard-2c6g32/bind.yaml similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g32/bind.yaml rename to addons/opensearch/2.10/plans/standard-2c6g32/bind.yaml diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/instance-schema.json b/addons/opensearch/2.10/plans/standard-2c6g32/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g32/instance-schema.json rename to addons/opensearch/2.10/plans/standard-2c6g32/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-2c6g32/meta.yaml b/addons/opensearch/2.10/plans/standard-2c6g32/meta.yaml new file mode 100644 index 00000000..aee9d531 --- /dev/null +++ b/addons/opensearch/2.10/plans/standard-2c6g32/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c6g32" +id: 278b07be-4bd3-4618-b97e-6fa6d471709d +description: "Opensearch standard-2c6g32 plan which limit resources 2 cores 6Gi memory and persistence size 32Gi." +displayName: "standard-2c6g32" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml b/addons/opensearch/2.10/plans/standard-2c6g32/values.yaml similarity index 98% rename from addons/opensearch/2.10/plans/standard-2c4g32/values.yaml rename to addons/opensearch/2.10/plans/standard-2c6g32/values.yaml index 3d7e226e..4e914728 100644 --- a/addons/opensearch/2.10/plans/standard-2c4g32/values.yaml +++ b/addons/opensearch/2.10/plans/standard-2c6g32/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: "hb-opensearch-standard-2c4g32" +fullnameOverride: "hb-opensearch-standard-2c6g32" ## @section Master-elegible nodes parameters master: @@ -22,7 +22,7 @@ master: resources: limits: cpu: 2 - memory: 4Gi + memory: 6Gi requests: cpu: 200m memory: 512Mi diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/bind.yaml b/addons/opensearch/2.10/plans/standard-2c6g64/bind.yaml similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g64/bind.yaml rename to addons/opensearch/2.10/plans/standard-2c6g64/bind.yaml diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/instance-schema.json b/addons/opensearch/2.10/plans/standard-2c6g64/instance-schema.json similarity index 100% rename from addons/opensearch/2.10/plans/standard-2c4g64/instance-schema.json rename to addons/opensearch/2.10/plans/standard-2c6g64/instance-schema.json diff --git a/addons/opensearch/2.10/plans/standard-2c6g64/meta.yaml b/addons/opensearch/2.10/plans/standard-2c6g64/meta.yaml new file mode 100644 index 00000000..c0e1a517 --- /dev/null +++ b/addons/opensearch/2.10/plans/standard-2c6g64/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c6g64" +id: 79168ebf-5508-4425-a685-47a6d5ccb512 +description: "Opensearch standard-2c6g64 plan which limit resources 2 cores 6Gi memory and persistence size 64Gi." +displayName: "standard-2c6g64" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml b/addons/opensearch/2.10/plans/standard-2c6g64/values.yaml similarity index 98% rename from addons/opensearch/2.10/plans/standard-2c4g64/values.yaml rename to addons/opensearch/2.10/plans/standard-2c6g64/values.yaml index 1d7f5782..5f5fda13 100644 --- a/addons/opensearch/2.10/plans/standard-2c4g64/values.yaml +++ b/addons/opensearch/2.10/plans/standard-2c6g64/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: "hb-opensearch-standard-2c4g64" +fullnameOverride: "hb-opensearch-standard-2c6g64" ## @section Master-elegible nodes parameters master: @@ -22,7 +22,7 @@ master: resources: limits: cpu: 2 - memory: 4Gi + memory: 6Gi requests: cpu: 500m memory: 512Mi diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml b/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml deleted file mode 100644 index 986bed1f..00000000 --- a/addons/opensearch/3.0/plans/standard-2c4g32/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-2c4g32" -id: 3b3addda-c4fc-4a59-bbf5-0f640920e09f -description: "Opensearch standard-2c4g32 plan which limit resources 2 cores 4Gi memory and persistence size 32Gi." -displayName: "standard-2c4g32" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml b/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml deleted file mode 100644 index 88554185..00000000 --- a/addons/opensearch/3.0/plans/standard-2c4g64/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: "standard-2c4g64" -id: cd949799-61e9-454e-a9df-26299922be5a -description: "Opensearch standard-2c4g64 plan which limit resources 2 cores 4Gi memory and persistence size 64Gi." -displayName: "standard-2c4g64" -bindable: true -maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/bind.yaml b/addons/opensearch/3.0/plans/standard-2c6g32/bind.yaml similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g32/bind.yaml rename to addons/opensearch/3.0/plans/standard-2c6g32/bind.yaml diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/instance-schema.json b/addons/opensearch/3.0/plans/standard-2c6g32/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g32/instance-schema.json rename to addons/opensearch/3.0/plans/standard-2c6g32/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-2c6g32/meta.yaml b/addons/opensearch/3.0/plans/standard-2c6g32/meta.yaml new file mode 100644 index 00000000..f8f3d746 --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c6g32/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c6g32" +id: 3b3addda-c4fc-4a59-bbf5-0f640920e09f +description: "Opensearch standard-2c6g32 plan which limit resources 2 cores 6Gi memory and persistence size 32Gi." +displayName: "standard-2c6g32" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml b/addons/opensearch/3.0/plans/standard-2c6g32/values.yaml similarity index 98% rename from addons/opensearch/3.0/plans/standard-2c4g32/values.yaml rename to addons/opensearch/3.0/plans/standard-2c6g32/values.yaml index 9cb5b8ee..3028fdfd 100644 --- a/addons/opensearch/3.0/plans/standard-2c4g32/values.yaml +++ b/addons/opensearch/3.0/plans/standard-2c6g32/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: "hb-opensearch-standard-2c4g32" +fullnameOverride: "hb-opensearch-standard-2c6g32" ## @section Master-elegible nodes parameters master: @@ -22,7 +22,7 @@ master: resources: limits: cpu: 2 - memory: 4Gi + memory: 6Gi requests: cpu: 500m memory: 512Mi diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml b/addons/opensearch/3.0/plans/standard-2c6g64/bind.yaml similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g64/bind.yaml rename to addons/opensearch/3.0/plans/standard-2c6g64/bind.yaml diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/instance-schema.json b/addons/opensearch/3.0/plans/standard-2c6g64/instance-schema.json similarity index 100% rename from addons/opensearch/3.0/plans/standard-2c4g64/instance-schema.json rename to addons/opensearch/3.0/plans/standard-2c6g64/instance-schema.json diff --git a/addons/opensearch/3.0/plans/standard-2c6g64/meta.yaml b/addons/opensearch/3.0/plans/standard-2c6g64/meta.yaml new file mode 100644 index 00000000..8482ff3d --- /dev/null +++ b/addons/opensearch/3.0/plans/standard-2c6g64/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c6g64" +id: cd949799-61e9-454e-a9df-26299922be5a +description: "Opensearch standard-2c6g64 plan which limit resources 2 cores 6Gi memory and persistence size 64Gi." +displayName: "standard-2c6g64" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml b/addons/opensearch/3.0/plans/standard-2c6g64/values.yaml similarity index 98% rename from addons/opensearch/3.0/plans/standard-2c4g64/values.yaml rename to addons/opensearch/3.0/plans/standard-2c6g64/values.yaml index a1c5641b..23dad198 100644 --- a/addons/opensearch/3.0/plans/standard-2c4g64/values.yaml +++ b/addons/opensearch/3.0/plans/standard-2c6g64/values.yaml @@ -1,6 +1,6 @@ ## @param fullnameOverride String to fully override common.names.fullname ## -fullnameOverride: "hb-opensearch-standard-2c4g64" +fullnameOverride: "hb-opensearch-standard-2c6g64" ## @section Master-elegible nodes parameters master: @@ -22,7 +22,7 @@ master: resources: limits: cpu: 2 - memory: 4Gi + memory: 6Gi requests: cpu: 500m memory: 512Mi From 0c2759234aea1f1e25c0d61997246e9e02a5a3c7 Mon Sep 17 00:00:00 2001 From: Eamon Date: Thu, 19 Mar 2026 10:18:40 +0800 Subject: [PATCH 90/93] chore(postgresql-cluster): add version 18 (#128) --- addons/index.yaml | 2 + .../18/chart/postgresql-cluster-18/Chart.yaml | 28 ++ .../postgresql-cluster-18/templates/NOTES.txt | 25 + .../templates/_helpers.tpl | 219 +++++++++ .../templates/cm-backup.yaml | 16 + .../templates/cm-logicalbackup .yaml | 19 + .../templates/cm-patroni.yaml | 20 + .../templates/cm-postgresql.yaml | 18 + .../templates/cronjob.yaml | 43 ++ .../templates/logicalbackup-cronjob.yaml | 69 +++ .../templates/networkpolicy.yaml | 54 +++ .../postgresql-cluster-18/templates/role.yaml | 49 ++ .../templates/rolebinding.yaml | 19 + .../postgresql-cluster-18/templates/sec.yaml | 18 + .../templates/serviceaccount.yaml | 12 + .../templates/statefulset.yaml | 273 +++++++++++ .../templates/svc-config.yaml | 11 + .../templates/svc-master.yaml | 24 + .../templates/svc-metrics.yaml | 32 ++ .../templates/svc-relp.yaml | 26 ++ .../postgresql-cluster-18/templates/svc.yaml | 18 + .../chart/postgresql-cluster-18/values.yaml | 440 ++++++++++++++++++ addons/postgresql-cluster/18/meta.yaml | 30 ++ .../18/plans/standard-16c64g400/bind.yaml | 41 ++ .../standard-16c64g400/instance-schema.json | 12 + .../18/plans/standard-16c64g400/meta.yaml | 6 + .../18/plans/standard-16c64g400/values.yaml | 81 ++++ .../18/plans/standard-2c4g20/bind.yaml | 41 ++ .../standard-2c4g20/instance-schema.json | 12 + .../18/plans/standard-2c4g20/meta.yaml | 6 + .../18/plans/standard-2c4g20/values.yaml | 81 ++++ .../18/plans/standard-2c8g50/bind.yaml | 41 ++ .../standard-2c8g50/instance-schema.json | 12 + .../18/plans/standard-2c8g50/meta.yaml | 6 + .../18/plans/standard-2c8g50/values.yaml | 83 ++++ .../18/plans/standard-32c128g800/bind.yaml | 41 ++ .../standard-32c128g800/instance-schema.json | 12 + .../18/plans/standard-32c128g800/meta.yaml | 6 + .../18/plans/standard-32c128g800/values.yaml | 82 ++++ .../18/plans/standard-32c64g4000/bind.yaml | 41 ++ .../standard-32c64g4000/instance-schema.json | 12 + .../18/plans/standard-32c64g4000/meta.yaml | 6 + .../18/plans/standard-32c64g4000/values.yaml | 82 ++++ .../18/plans/standard-4c16g100/bind.yaml | 41 ++ .../standard-4c16g100/instance-schema.json | 12 + .../18/plans/standard-4c16g100/meta.yaml | 6 + .../18/plans/standard-4c16g100/values.yaml | 83 ++++ .../18/plans/standard-8c32g200/bind.yaml | 41 ++ .../standard-8c32g200/instance-schema.json | 12 + .../18/plans/standard-8c32g200/meta.yaml | 6 + .../18/plans/standard-8c32g200/values.yaml | 82 ++++ 51 files changed, 2452 insertions(+) create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/Chart.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/NOTES.txt create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/_helpers.tpl create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-backup.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-logicalbackup .yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-patroni.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-postgresql.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cronjob.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/logicalbackup-cronjob.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/networkpolicy.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/role.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/rolebinding.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/sec.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/serviceaccount.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/statefulset.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-config.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-master.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-metrics.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-relp.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc.yaml create mode 100644 addons/postgresql-cluster/18/chart/postgresql-cluster-18/values.yaml create mode 100644 addons/postgresql-cluster/18/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-16c64g400/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-16c64g400/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-16c64g400/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-16c64g400/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c4g20/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c4g20/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-2c4g20/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c4g20/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c8g50/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c8g50/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-2c8g50/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-2c8g50/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c128g800/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c128g800/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-32c128g800/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c128g800/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c64g4000/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c64g4000/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-32c64g4000/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-32c64g4000/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-4c16g100/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-4c16g100/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-4c16g100/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-4c16g100/values.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-8c32g200/bind.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-8c32g200/instance-schema.json create mode 100644 addons/postgresql-cluster/18/plans/standard-8c32g200/meta.yaml create mode 100644 addons/postgresql-cluster/18/plans/standard-8c32g200/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index 2bfb50af..c5e13ead 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -49,6 +49,8 @@ entries: description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." - version: 17 description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." + - version: 18 + description: "PostgreSQL is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance." seaweedfs: - version: 3 description: "SeaweedFS is a fast distributed storage system for blobs, objects, files, and data lake, for billions of files." diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/Chart.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/Chart.yaml new file mode 100644 index 00000000..d3b5cd09 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: "18" +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.1 +description: PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. +engine: gotpl +home: https://github.com/drycc/charts/tree/master/drycc/postgresql +icon: https://drycc.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + - patroni +maintainers: + - email: zhang.eamon@hotmail.com + name: zhangeamon +name: postgresql +sources: + - https://github.com/drycc-addons/ + - https://www.postgresql.org/ +version: "18.6" diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/NOTES.txt b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/NOTES.txt new file mode 100644 index 00000000..22a4f2d2 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/NOTES.txt @@ -0,0 +1,25 @@ +Patroni can be accessed via port 5432 on the following DNS name from within your cluster: +{{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To get your password for superuser run: + + # superuser password + PGPASSWORD_SUPERUSER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-superuser}" | base64 --decode) + + # admin password + PGPASSWORD_ADMIN=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "patroni.fullname" . }} -o jsonpath="{.data.password-admin}" | base64 --decode) + +To connect to your database: + +1. Run a postgres pod and connect using the psql cli: + # login as superuser + kubectl run -i --tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_SUPERUSER" \ + --command -- psql -U postgres \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres + + # login as admin + kubectl run -i -tty --rm psql --image=postgres \ + --env "PGPASSWORD=$PGPASSWORD_ADMIN" \ + --command -- psql -U admin \ + -h {{ template "patroni.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local postgres diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/_helpers.tpl b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/_helpers.tpl new file mode 100644 index 00000000..d5876632 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/_helpers.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "patroni.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "patroni.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "patroni.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use. +*/}} +{{- define "patroni.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "patroni.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createCronJob" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a cronjob object should be created for Postgresql HA patroni ## TODO feature +*/}} +{{- define "patroni.createLogicalBackupCronJob" -}} +{{- if and .Values.logicalbackup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for Postgresql HA patroni +*/}} +{{- define "patroni.createConfigmap" -}} +{{- if and .Values.preInitScript }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Create patroni envs. +*/}} +{{- define "patroni.envs" }} +{{- if .Values.kubernetes.configmaps.enable }} +- name: KUBERNETES_USE_CONFIGMAPS + value: "true" +{{- end }} +{{- if .Values.kubernetes.endpoints.enable }} +- name: PATRONI_KUBERNETES_USE_ENDPOINTS + value: 'true' +{{- end }} +- name: PATRONI_KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP +- name: PATRONI_KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +- name: PATRONI_KUBERNETES_BYPASS_API_SERVICE + value: 'true' +- name: PATRONI_KUBERNETES_LABELS + value: '{application: {{ template "patroni.fullname" . }},release: {{ .Release.Name }},cluster-name: {{ template "patroni.fullname" . }}}' +- name: PATRONI_SUPERUSER_USERNAME + value: postgres +- name: PATRONI_SUPERUSER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser +- name: PATRONI_REPLICATION_USERNAME + value: standby +- name: PATRONI_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-replication +- name: PATRONI_REWIND_USERNAME + value: rewinder +- name: PATRONI_REWIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-rewind +- name: ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-user +- name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: admin-password +- name: PATRONI_SCOPE + value: {{ template "patroni.fullname" . }} +- name: PATRONI_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: PATRONI_POSTGRESQL_DATA_DIR + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" +- name: PATRONI_POSTGRESQL_PGPASS + value: /tmp/pgpass +- name: PATRONI_POSTGRESQL_LISTEN + value: '0.0.0.0:5432' +- name: PATRONI_RESTAPI_LISTEN + value: '0.0.0.0:8008' +{{- end -}} + +{{/* +Return true if a configmap object should be created for PG backup. +*/}} +{{- define "backup.createConfigmap" -}} +{{- if and .Values.backup.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Generate random password +*/}} + +{{/* +Get the super user password ; +*/}} +{{- define "credentials.superuserValue" }} +{{- if .Values.credentials.superuser }} + {{- .Values.credentials.superuser -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-superuser") -}} +{{- end -}} +{{- end }} + +{{/* +Get the rewind password ; +*/}} +{{- define "credentials.rewindValue" }} +{{- if .Values.credentials.rewind }} + {{- .Values.credentials.rewind -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-rewind") -}} +{{- end -}} +{{- end }} + +{{/* +Get the replication password ; +*/}} +{{- define "credentials.replicationValue" }} +{{- if .Values.credentials.replication }} + {{- .Values.credentials.replication -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "password-replication") -}} +{{- end -}} +{{- end }} + +{{/* +Get the administrator password ; +*/}} +{{- define "adminRole.passwordValue" }} +{{- if .Values.adminRole.password }} + {{- .Values.adminRole.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "admin-password") -}} +{{- end -}} +{{- end }} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} +{{- $len := (default 16 .Length) | int -}} +{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} +{{- if $obj }} +{{- index $obj .Key | b64dec -}} +{{- else -}} +{{- randAlphaNum $len -}} +{{- end -}} +{{- end }} + diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-backup.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-backup.yaml new file mode 100644 index 00000000..fdc62197 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-backup.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + backup.env: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.backupEnv "context" $ ) | nindent 4 }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-logicalbackup .yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-logicalbackup .yaml new file mode 100644 index 00000000..8de61100 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-logicalbackup .yaml @@ -0,0 +1,19 @@ +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + logicalbackup.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.logicalbackupScript "context" $ ) | nindent 4 }} + +{{- end }} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-patroni.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-patroni.yaml new file mode 100644 index 00000000..ad4b5849 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-patroni.yaml @@ -0,0 +1,20 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-patroni + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + pre_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.preInitScript "context" $ ) | nindent 4 }} + post_init.sh: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postInitScript "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-postgresql.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-postgresql.yaml new file mode 100644 index 00000000..8aba698a --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cm-postgresql.yaml @@ -0,0 +1,18 @@ +{{- if (include "patroni.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-postgresql + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom_conf.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.postgresql.config "context" $ ) | nindent 4 }} +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cronjob.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cronjob.yaml new file mode 100644 index 00000000..495dfa7b --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/cronjob.yaml @@ -0,0 +1,43 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-backup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.backup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + curl "http://${REPLHOST}:9000/pg_backup" + env: + - name: REPLHOST + value: {{ include "patroni.fullname" . }}-repl +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/logicalbackup-cronjob.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/logicalbackup-cronjob.yaml new file mode 100644 index 00000000..071b9bd9 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/logicalbackup-cronjob.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} +{{- if (include "patroni.createLogicalBackupCronJob" .) }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ include "patroni.fullname" . }}-logicalbackup + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + schedule: "{{ .Values.logicalbackup.scheduleCronJob }}" + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: {{ .Chart.Name }}-logicalbackup + image: "{{ .Values.logicalbackupImages.repository }}:{{ .Values.logicalbackupImages.tag }}" + imagePullPolicy: {{ .Values.logicalbackupImages.pullPolicy | quote }} + command: + - /usr/bin/env + - bash + - -c + - | + sh /opt/drycc/logicalbackup/logicalbackup.sh + env: + - name: PGHOST + value: {{ include "patroni.fullname" . }}-repl + - name: PGPORT + value: "5432" + - name: PGUSER + value: postgres + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: MINIO_BUCKET + value: {{ .Values.logicalbackup.minio.bucket }} + - name: MINIO_HOST + value: {{ .Values.logicalbackup.minio.endpoint }} + - name: MINIO_ACCESS_KEY + value: {{ .Values.logicalbackup.minio.access_key }} + - name: MINIO_SECRET_KEY + value: {{ .Values.logicalbackup.minio.secret_key }} + + volumeMounts: + - mountPath: "/opt/drycc/logicalbackup/" + name: logicalbackup-config + + volumes: + - name: logicalbackup-config + configMap: + name: {{ template "common.names.fullname" . }}-logicalbackup +{{- end -}} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/networkpolicy.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/networkpolicy.yaml new file mode 100644 index 00000000..19ff2288 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/networkpolicy.yaml @@ -0,0 +1,54 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: patroni + cluster-name: {{ template "patroni.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if eq .Values.service.type "ClusterIP" }} + ingress: + # Allow inbound connections + - ports: + - port: 5432 + - port: 9000 + - port: 80 + - port: 8008 + {{- if and .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{ end }} + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: backup + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + ingress: + - {} + {{- end }} +{{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/role.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/role.yaml new file mode 100644 index 00000000..8dec5309 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/role.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: + - create + - get + - list + - patch + - update + - watch + # delete is required only for 'patronictl remove' + - delete +- apiGroups: [""] + resources: ["services"] + verbs: + - create +- apiGroups: [""] + resources: ["endpoints"] + verbs: + - create + - get + - patch + - update + # the following three privileges are necessary only when using endpoints + - list + - watch + # delete is required only for for 'patronictl remove' + - delete + - deletecollection +- apiGroups: [""] + resources: ["pods"] + verbs: + - get + - list + - patch + - update + - watch +{{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/rolebinding.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/rolebinding.yaml new file mode 100644 index 00000000..5e15948f --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ template "patroni.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "patroni.fullname" . }} +{{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/sec.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/sec.yaml new file mode 100644 index 00000000..c2e13055 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/sec.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +type: Opaque +data: + password-superuser: {{ include "credentials.superuserValue" . | b64enc | quote }} + password-rewind: {{ include "credentials.rewindValue" . | b64enc | quote }} + password-replication: {{ include "credentials.replicationValue" . | b64enc | quote }} + admin-user: {{ .Values.adminRole.username | b64enc | quote }} + admin-password: {{ include "adminRole.passwordValue" . | b64enc | quote }} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/serviceaccount.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/serviceaccount.yaml new file mode 100644 index 00000000..e1b2ebf6 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "patroni.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/statefulset.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/statefulset.yaml new file mode 100644 index 00000000..3f1efb1b --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/statefulset.yaml @@ -0,0 +1,273 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + serviceName: {{ template "patroni.fullname" . }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + template: + metadata: + name: {{ template "patroni.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + spec: + {{- if .Values.patroni.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.patroni.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.patroni.nodeAffinityPreset.type "key" .Values.patroni.nodeAffinityPreset.key "values" .Values.patroni.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.patroni.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.patroni.nodeSelector "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "patroni.serviceAccountName" . }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + env: + {{- include "patroni.envs" . | indent 8 }} + {{- if .Values.env }} + {{- range $key, $val := .Values.env }} + - name: {{ $key | quote | upper }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + scheme: HTTP + path: /readiness + port: 8008 + initialDelaySeconds: 3 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /liveness + port: 8008 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /usr/bin/env + - bash + - -c + - | + # switch leader pod if the current pod is the leader + if curl --fail http://localhost:8008/read-write; then + init-stack patronictl switchover --force + fi + ports: + - containerPort: 8008 + protocol: TCP + - containerPort: 5432 + protocol: TCP + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/scripts/" + name: patroni-config + - mountPath: "/opt/drycc/postgresql/config/" + name: postgresql-config + # readOnly: true + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + - name: dshm + mountPath: /dev/shm + # readOnly: true + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: "{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}" + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + - name: DATA_SOURCE_NAME + value: {{ printf "postgresql://tea_mon:password@127.0.0.1:5432/postgres?sslmode=disable" }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPort }} + startupProbe: + initialDelaySeconds: 10 + tcpSocket: + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: http-metrics + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + - name: {{ .Chart.Name }}-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + # securityContext: + # runAsUser: postgres + # fsGroup: postgres + command: + - /usr/bin/env + - bash + - -c + - | + python3 /opt/drycc/postgresql/pgbackup.py 0.0.0.0 9000 + env: + - name: PGHOST + value: localhost + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ template "patroni.fullname" . }} + key: password-superuser + - name: PGUSER + value: postgres + - name: PGDATABASE + value: postgres + - name: PGPORT + value: "5432" + - name: PGDATA + value: "{{ .Values.persistentVolume.mountPath }}/data" + ports: + - containerPort: 9000 + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + - mountPath: "/opt/drycc/postgresql/backup/" + name: backup-config + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ .Values.affinity | toYaml | indent 8 }} + {{- else if .Values.affinityTemplate }} + affinity: +{{ tpl .Values.affinityTemplate . | indent 8 }} + {{- end }} + volumes: + - name: patroni-config + configMap: + name: {{ template "common.names.fullname" . }}-patroni + - name: postgresql-config + configMap: + name: {{ template "common.names.fullname" . }}-postgresql + - name: backup-config + configMap: + name: {{ template "common.names.fullname" . }}-backup + {{- if not .Values.persistentVolume.enabled }} + - name: storage-volume + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + {{- if .Values.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + annotations: + {{- if .Values.persistentVolume.annotations }} +{{ toYaml .Values.persistentVolume.annotations | indent 10 }} + {{- end }} + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + accessModes: +{{ toYaml .Values.persistentVolume.accessModes | indent 8 }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-config.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-config.yaml new file mode 100644 index 00000000..5f7b0f60 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-config.yaml @@ -0,0 +1,11 @@ +# headless service to avoid deletion of patronidemo-config endpoint +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-config + labels: + application: {{ template "patroni.fullname" . }} + release: {{ .Release.Name }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + clusterIP: None diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-master.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-master.yaml new file mode 100644 index 00000000..609ed5ba --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-master.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-master + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: primary + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-metrics.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-metrics.yaml new file mode 100644 index 00000000..862c6a0c --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-metrics.yaml @@ -0,0 +1,32 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "patroni.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: patroni +{{- end }} diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-relp.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-relp.yaml new file mode 100644 index 00000000..252882b3 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc-relp.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }}-repl + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica +spec: + type: {{ .Values.service.type }} + selector: + application: {{ template "patroni.fullname" . }} + cluster-name: {{ template "patroni.fullname" . }} + role: replica + ports: + - name: postgresql + port: 5432 + targetPort: 5432 + - name: pgbackup + port: 9000 + targetPort: 9000 \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc.yaml new file mode 100644 index 00000000..ac0c2c44 --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/templates/svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "patroni.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: patroni + application: {{ template "patroni.fullname" . }} + chart: {{ template "patroni.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + cluster-name: {{ template "patroni.fullname" . }} +spec: + type: ClusterIP + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP \ No newline at end of file diff --git a/addons/postgresql-cluster/18/chart/postgresql-cluster-18/values.yaml b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/values.yaml new file mode 100644 index 00000000..1ecfd7cc --- /dev/null +++ b/addons/postgresql-cluster/18/chart/postgresql-cluster-18/values.yaml @@ -0,0 +1,440 @@ +replicaCount: 3 +diagnosticMode: + enable: false + +service: + type: ClusterIP + +image: + # Image was built from registry.drycc.cc/drycc-addons/patroni:3.2 + # https://github.com/zalando/spilo/tree/master/postgres-appliance + repository: registry.drycc.cc/drycc-addons/postgresql-patroni + tag: 18 + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + +logicalbackupImages: + repository: registry.drycc.cc/drycc-addons/postgresql-logicalbackup + tag: 18 + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + +# Credentials used by Patroni , passwd +# https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql +# https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst +credentials: + superuser: "" + rewind: "" + replication: "" + +adminRole: + username: administrator + password: "" + +# Distribution Configuration stores +# Please note that only one of the following stores should be enabled. +kubernetes: + endpoints: + enable: true + configmaps: + enable: false + +# Extra custom environment variables. +env: {} + +# +#custom patroni.yaml used by patroni boot +# configuration: {} +preInitScript: | + mkdir -p /home/postgres/pgdata/log + ln -sf /dev/stdout "/home/postgres/pgdata/log/postgresql.csv" + cat > /opt/drycc/postgresql/patroni.yml <<__EOF__ + log: + level: INFO + restapi: + listen: 0.0.0.0:8008 + connect_address: 0.0.0.0:8008 + bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + failsafe_mode: true + postgresql: + use_pg_rewind: true + use_slots: true + pg_hba: + - local all all peer + - host all tea_mon 127.0.0.1/32 trust + - host all all 0.0.0.0/0 scram-sha-256 + - host replication ${PATRONI_REPLICATION_USERNAME} 0.0.0.0/0 scram-sha-256 + - host replication postgres 0.0.0.0/0 scram-sha-256 + custom_conf: '/opt/drycc/postgresql/config/custom_conf.conf' + parameters: + max_connections: {{ .Values.patroni.pgParameters.max_connections }} + max_worker_processes: {{ .Values.patroni.pgParameters.max_worker_processes }} + max_parallel_workers: {{ .Values.patroni.pgParameters.max_parallel_workers }} + wal_level: logical + hot_standby: "on" + max_wal_senders: 10 + max_replication_slots: 10 + hot_standby_feedback: on + max_prepared_transactions: 0 + max_locks_per_transaction: 64 + wal_log_hints: "on" + wal_keep_size: "1 GB" + max_slot_wal_keep_size: {{ .Values.patroni.pgParameters.max_slot_wal_keep_size | quote }} + track_commit_timestamp: "off" + archive_mode: "on" + archive_timeout: 300s + archive_command: sh /opt/drycc/postgresql/walbackup.sh %p + # timescaledb.license: 'timescale' + shared_preload_libraries: 'auto_explain,pg_stat_statements,timescaledb' + log_destination: 'csvlog' + log_filename: postgresql.log + logging_collector: on + log_directory: /home/postgres/pgdata/log + log_min_messages: 'info' + log_min_duration_statement: 1000 + log_lock_waits: on + log_statement: 'ddl' + {{ if .Values.postgresql.timezone -}} timezone: {{ .Values.postgresql.timezone }} {{- end }} + initdb: + - auth-host: scram-sha-256 + - auth-local: trust + - encoding: UTF8 + - locale: en_US.UTF-8 + - data-checksums + post_bootstrap: sh /opt/drycc/postgresql/scripts/post_init.sh + restapi: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:8008' + postgresql: + connect_address: '${PATRONI_KUBERNETES_POD_IP}:5432' + authentication: + superuser: + username: postgres + password: '${PATRONI_SUPERUSER_PASSWORD}' + replication: + username: standby + password: '${PATRONI_REPLICATION_PASSWORD}' + rewind: # Has no effect on postgres 10 and lower + username: rewinder + password: '${PATRONI_REWIND_PASSWORD}' + watchdog: + mode: off + __EOF__ + +postInitScript: | + #!/bin/bash + set -Eeu + # Create monitor user + psql -w -c "CREATE USER tea_mon ;GRANT pg_monitor TO tea_mon ;create extension pg_stat_statements;create extension pg_buffercache ;" + # Create admin user + if [[( -n "$ADMIN_USER") && ( -n "$ADMIN_PASSWORD")]]; then + + echo "Creating user ${ADMIN_USER}" + psql -w -c "CREATE USER ${ADMIN_USER} WITH SUPERUSER CREATEDB CREATEROLE CONNECTION LIMIT 10 LOGIN ENCRYPTED PASSWORD '${ADMIN_PASSWORD}'" + + else + echo "Skipping create admin user" + fi + psql -w -c "CHECKPOINT;CHECKPOINT;" + +backupEnv: | + #!/bin/bash + export USE_WALG={{ .Values.backup.enabled | quote }} + export BACKUP_NUM_TO_RETAIN={{ .Values.backup.retainBackups | quote}} + export WALG_BACKUP_THRESHOLD_MEGABYTES={{ .Values.backup.backupThresholdMegabytes | quote }} + export WALE_BACKUP_THRESHOLD_PERCENTAGE={{ .Values.backup.backupThresholdPercentage | quote }} + export AWS_ACCESS_KEY_ID={{ .Values.backup.s3.awsAccessKeyID | quote }} + export AWS_SECRET_ACCESS_KEY={{ .Values.backup.s3.awsSecretAccessKey | quote }} + export WALG_S3_PREFIX={{ .Values.backup.s3.walGS3Prefix | quote }} + export AWS_ENDPOINT={{ .Values.backup.s3.awsEndpoint | quote }} + export AWS_S3_FORCE_PATH_STYLE={{ .Values.backup.s3.awsS3ForcePathStyle | quote }} + export AWS_REGION={{ .Values.backup.s3.awsRegion | quote }} + +logicalbackupScript: | + #!/bin/bash + + # PostgreSQL 设置 + # POSTGRES_USER="postgres" + # POSTGRES_HOST="127.0.0.1" + + # MinIO 设置 + # MINIO_BUCKET="pgbackup" + # MINIO_HOST="http://localhost:9000" + # MINIO_ACCESS_KEY="admin123" + # MINIO_SECRET_KEY="admin123" + + # 设置 MinIO 客户端别名 + mc alias set myminio $MINIO_HOST $MINIO_ACCESS_KEY $MINIO_SECRET_KEY + + # 创建以当前日期和时间命名的备份目录 + BACKUP_DIR="$(date +%Y%m%d%H%M)" + MINIO_PATH="myminio/$MINIO_BUCKET/$BACKUP_DIR" + + # 备份全局对象 + echo "Backing up global objects to $MINIO_PATH/roles_globals.sql.gz" + pg_dumpall -g -U "$POSTGRES_USER" -h "$POSTGRES_HOST" | pigz | mc pipe "$MINIO_PATH/roles_globals.sql.gz" + + # 获取所有非模板数据库的列表 + DATABASES=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;") + + # 为每个数据库执行备份 + for DB in $DATABASES; do + echo "Backing up $DB to $MINIO_PATH/$DB.sql.gz" + pg_dump -U "$POSTGRES_USER" -h "$POSTGRES_HOST" "$DB" | pigz | mc pipe "$MINIO_PATH/$DB.sql.gz" + done + + echo "Backup process completed!" + +postgresql: + timezone: + config: |- + log_min_duration_statement = 1000 + max_wal_size = 4GB + min_wal_size = 4GB + max_wal_senders = 10 + max_replication_slots = 10 + max_prepared_transactions = 0 + max_locks_per_transaction = 64 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: "2 GB" + + ## @param patroni.podAnnotations Additional pod annotations for Postgresql patroni pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param patroni.podAffinityPreset Postgresql patroni pod affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param patroni.podAntiAffinityPreset Postgresql patroni pod anti-affinity preset. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Postgresql Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param patroni.nodeAffinityPreset.type Postgresql patroni node affinity preset type. Ignored if `patroni.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param patroni.nodeAffinityPreset.key Postgresql patroni node label key to match Ignored if `patroni.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param patroni.nodeAffinityPreset.values Postgresql patroni node label values to match. Ignored if `patroni.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param patroni.affinity Affinity for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param patroni.nodeSelector Node labels for Postgresql patroni pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + +## Postgresql Prometheus exporter parameters +## +metrics: + enabled: true + image: + repository: registry.drycc.cc/drycc-addons/postgres-exporter + tag: "0" + # IfNotPresent , Always + pullPolicy: "IfNotPresent" + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database:.... + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + service: + ports: + metrics: 9187 + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + + customMetrics: {} + containerPort: 9187 + containerSecurityContext: + enabled: false + runAsUser: 1001 + runAsNonRoot: true + customLivenessProbe: {} + customReadinessProbe: + enabled: true + resources: + limits: + cpu: 100m + hugepages-2Mi: 20Mi + memory: 512Mi + requests: + cpu: 100m + memory: 512Mi + +logicalbackup: + enabled: false + scheduleCronJob: "22 0 * * 0" + minio: + used: true + buckect: "s3://xx" + access_key: "" + secret_key: "" + endpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +backup: + # Specifies whether Wal-G should be enabled + enabled: false + # Cron schedule for doing base backups + scheduleCronJob: "22 0 * * 0" + # Amount of base backups to retain + retainBackups: 2 + # Name of the secret that holds the credentials to the bucket + kubernetesSecret: + # Maximum size of the WAL segments accumulated after the base backup to + # consider WAL-G restore instead of pg_basebackup + backupThresholdMegabytes: 1024 + # Maximum ratio (in percents) of the accumulated WAL files to the base backup + # to consider WAL-G restore instead of pg_basebackup + backupThresholdPercentage: 30 + s3: + used: true + awsAccessKeyID: "" + awsSecretAccessKey: "" + walGS3Prefix: "s3://xx" + awsEndpoint: "http://xxxx:9000" + awsS3ForcePathStyle: "true" + awsRegion: dx-1 + +logicalBackup: + enabled: false + +## persistentVolumeClaimRetentionPolicy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet +## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced +## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted +persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete +persistentVolume: + enabled: true + size: 10G + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + subPath: "" + mountPath: "/home/postgres/pgdata" + annotations: {} + accessModes: + - ReadWriteOnce + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 100m + hugepages-2Mi: 4Mi + memory: 512Mi + requests: + cpu: 100m + memory: 512Mi + +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "1Gi" + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinityTemplate: | + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + application: {{ template "patroni.name" . }} + release: {{ .Release.Name | quote }} +affinity: {} +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: +## Postgresql Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port Postgresql is listening + ## on. When true, Postgresql will accept connections from any source + ## (with the correct destination port). + ## + allowCurrentNamespace: true + allowNamespaces: +clusterDomain: cluster.local diff --git a/addons/postgresql-cluster/18/meta.yaml b/addons/postgresql-cluster/18/meta.yaml new file mode 100644 index 00000000..df9589f6 --- /dev/null +++ b/addons/postgresql-cluster/18/meta.yaml @@ -0,0 +1,30 @@ +name: postgresql-cluster-18 +version: 18 +id: d0c8ade8-6950-4efa-89b3-f6da5f0a36dc +description: "postgresql-cluster-18" +displayName: "postgresql-cluster-18" +metadata: + displayName: "postgresql-cluster-18" + provider: + name: drycc + supportURL: https://www.postgresql.org/ + documentationURL: https://github.com/drycc-addons/drycc-docker-postgresql-cluster +tags: postgresql-cluster +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: + - name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" + - name: "service.type" + required: false + description: "service type config for values.yaml" + - name: "backup" + required: false + description: "Whether to use S3 for backup your data. default false . ps: Make sure there is a available S3 " + - name: "logicalbackup" + required: false + description: "Whether to use S3 for logical backup your data. default false . ps: Make sure there is a available S3 " +archive: false diff --git a/addons/postgresql-cluster/18/plans/standard-16c64g400/bind.yaml b/addons/postgresql-cluster/18/plans/standard-16c64g400/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-16c64g400/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-16c64g400/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-16c64g400/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-16c64g400/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-16c64g400/meta.yaml b/addons/postgresql-cluster/18/plans/standard-16c64g400/meta.yaml new file mode 100644 index 00000000..2089565d --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-16c64g400/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c64g400" +id: 6f846a7c-0495-4810-a768-6769b1f6430e +description: "PostgreSQL Cluster standard-16c64g400 plan: Disk 400Gi ,vCPUs 16 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-16c64g400" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-16c64g400/values.yaml b/addons/postgresql-cluster/18/plans/standard-16c64g400/values.yaml new file mode 100644 index 00000000..72b92be7 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-16c64g400/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-400 +patroni: + pgParameters: + max_worker_processes: 32 + max_parallel_workers: 16 + max_connections: 2000 + max_slot_wal_keep_size: "10 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '32 MB' + maintenance_work_mem = '520 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 8 + max_parallel_maintenance_workers = 8 + max_parallel_workers = 16 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 40GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 16000m + hugepages-2Mi: 40Mi + memory: 64Gi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 400Gi + +shmVolume: + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-2c4g20/bind.yaml b/addons/postgresql-cluster/18/plans/standard-2c4g20/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c4g20/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-2c4g20/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-2c4g20/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c4g20/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-2c4g20/meta.yaml b/addons/postgresql-cluster/18/plans/standard-2c4g20/meta.yaml new file mode 100644 index 00000000..14c20506 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c4g20/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g20" +id: 83de47d0-97c8-4133-a633-d1f9c0c2cc83 +description: "PostgreSQL Cluster standard-2c4g20 plan: Disk 20Gi ,vCPUs 2 , RAM 4G , DB MAX Connection 1000" +displayName: "standard-2c4g20" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-2c4g20/values.yaml b/addons/postgresql-cluster/18/plans/standard-2c4g20/values.yaml new file mode 100644 index 00000000..098a0a08 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c4g20/values.yaml @@ -0,0 +1,81 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-20 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 1000 + max_slot_wal_keep_size: "2 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '1024 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '3 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '2 GB' + min_wal_size = '1 GB' + + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + hugepages-2Mi: 20Mi + memory: 4Gi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 20Gi + +shmVolume: + sizeLimit: "2Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-2c8g50/bind.yaml b/addons/postgresql-cluster/18/plans/standard-2c8g50/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c8g50/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-2c8g50/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-2c8g50/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c8g50/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-2c8g50/meta.yaml b/addons/postgresql-cluster/18/plans/standard-2c8g50/meta.yaml new file mode 100644 index 00000000..175d2365 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c8g50/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c8g50" +id: 108376d3-0f87-4226-8e80-f200022383aa +description: "PostgreSQL Cluster standard-2c8g50 plan: Disk 50Gi ,vCPUs 2 , RAM 8G , DB MAX Connection 2000" +displayName: "standard-2c8g50" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-2c8g50/values.yaml b/addons/postgresql-cluster/18/plans/standard-2c8g50/values.yaml new file mode 100644 index 00000000..affca346 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-2c8g50/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-50 + +patroni: + pgParameters: + max_worker_processes: 4 + max_parallel_workers: 2 + max_connections: 2000 + max_slot_wal_keep_size: "5 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '2048 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '6 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '15 min' + checkpoint_completion_target = 0.9 + max_wal_size = '4 GB' + min_wal_size = '1 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 1 + max_parallel_maintenance_workers = 1 + max_parallel_workers = 2 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 5GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + hugepages-2Mi: 20Mi + memory: 8Gi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 50Gi + +shmVolume: + sizeLimit: "4Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-32c128g800/bind.yaml b/addons/postgresql-cluster/18/plans/standard-32c128g800/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c128g800/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-32c128g800/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-32c128g800/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c128g800/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-32c128g800/meta.yaml b/addons/postgresql-cluster/18/plans/standard-32c128g800/meta.yaml new file mode 100644 index 00000000..a2e3bd11 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c128g800/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c128g800" +id: 9f2af58a-6e67-4df7-8092-17c5ca94c357 +description: "PostgreSQL Cluster standard-32c128g800 plan: Disk 800Gi ,vCPUs 32 , RAM 128G , DB MAX Connection 2000" +displayName: "standard-32c128g800" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-32c128g800/values.yaml b/addons/postgresql-cluster/18/plans/standard-32c128g800/values.yaml new file mode 100644 index 00000000..c2ca0477 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c128g800/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: "100 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '32768 MB' + work_mem = '64 MB' + maintenance_work_mem = '720 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '90 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '64 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 80GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 128Gi + hugepages-2Mi: 80Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 800Gi + +shmVolume: + sizeLimit: "64Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-32c64g4000/bind.yaml b/addons/postgresql-cluster/18/plans/standard-32c64g4000/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c64g4000/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-32c64g4000/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-32c64g4000/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c64g4000/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-32c64g4000/meta.yaml b/addons/postgresql-cluster/18/plans/standard-32c64g4000/meta.yaml new file mode 100644 index 00000000..36707578 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c64g4000/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-32c64g4000" +id: 71afa502-7b03-41da-b9f7-61d8d7168dd6 +description: "PostgreSQL Cluster standard-32c64g4000 plan: Disk 4Ti ,vCPUs 32 , RAM 64G , DB MAX Connection 2000" +displayName: "standard-32c64g4000" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-32c64g4000/values.yaml b/addons/postgresql-cluster/18/plans/standard-32c64g4000/values.yaml new file mode 100644 index 00000000..56fc31f5 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-32c64g4000/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-800 + +patroni: + pgParameters: + max_worker_processes: 64 + max_parallel_workers: 32 + max_connections: 2000 + max_slot_wal_keep_size: "200 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '16384 MB' + work_mem = '256 MB' + maintenance_work_mem = '2048 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '45 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '30 min' + checkpoint_completion_target = 0.9 + max_wal_size = '32 GB' + min_wal_size = '4 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 16 + max_parallel_maintenance_workers = 16 + max_parallel_workers = 32 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 100GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 32000m + memory: 64Gi + hugepages-2Mi: 80Mi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 4Ti + +shmVolume: + sizeLimit: "32Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-4c16g100/bind.yaml b/addons/postgresql-cluster/18/plans/standard-4c16g100/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-4c16g100/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-4c16g100/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-4c16g100/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-4c16g100/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-4c16g100/meta.yaml b/addons/postgresql-cluster/18/plans/standard-4c16g100/meta.yaml new file mode 100644 index 00000000..20262416 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-4c16g100/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c16g100" +id: 5aea991e-3809-4c67-adf1-7b4ad04ce0f1 +description: "PostgreSQL Cluster standard-4c16g100 plan: Disk 100Gi ,vCPUs 4 , RAM 16G , DB MAX Connection 2000" +displayName: "standard-4c16g100" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-4c16g100/values.yaml b/addons/postgresql-cluster/18/plans/standard-4c16g100/values.yaml new file mode 100644 index 00000000..83ac2774 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-4c16g100/values.yaml @@ -0,0 +1,83 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-100 + +patroni: + pgParameters: + max_worker_processes: 8 + max_parallel_workers: 4 + max_connections: 2000 + max_slot_wal_keep_size: "10 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '4096 MB' + work_mem = '32 MB' + maintenance_work_mem = '320 MB' + huge_pages = off + effective_cache_size = '11 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '8 GB' + min_wal_size = '2 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 2 + max_parallel_maintenance_workers = 2 + max_parallel_workers = 4 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 10GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 4000m + hugepages-2Mi: 50Mi + memory: 16Gi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 100Gi + +shmVolume: + sizeLimit: "8Gi" diff --git a/addons/postgresql-cluster/18/plans/standard-8c32g200/bind.yaml b/addons/postgresql-cluster/18/plans/standard-8c32g200/bind.yaml new file mode 100644 index 00000000..55955ed6 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-8c32g200/bind.yaml @@ -0,0 +1,41 @@ +credential: +{{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTRANET_MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + - name: EXTRANET_REPL_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }}-repl + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' +{{- end }} + - name: DOMAIN_MASTER + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: DOMAIN_REPL + value: {{ template "common.names.fullname" . }}-repl.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: MASTER_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-master + jsonpath: '{ .spec.clusterIP }' + - name: REPL_HOST + valueFrom: + serviceRef: + name: {{ include "common.names.fullname" . }}-repl + jsonpath: '{ .spec.clusterIP }' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-password }' + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .data.admin-user }' + - name: PORT + value: 5432 + - name: DATABASE + value: postgres \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-8c32g200/instance-schema.json b/addons/postgresql-cluster/18/plans/standard-8c32g200/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-8c32g200/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/postgresql-cluster/18/plans/standard-8c32g200/meta.yaml b/addons/postgresql-cluster/18/plans/standard-8c32g200/meta.yaml new file mode 100644 index 00000000..dd992c00 --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-8c32g200/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c32g200" +id: 3af96fe4-66d5-41f6-8efa-00209fbe4e31 +description: "PostgreSQL Cluster standard-8c32g200 plan: Disk 200Gi ,vCPUs 8 , RAM 32G , DB MAX Connection 2000" +displayName: "standard-8c32g200" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/postgresql-cluster/18/plans/standard-8c32g200/values.yaml b/addons/postgresql-cluster/18/plans/standard-8c32g200/values.yaml new file mode 100644 index 00000000..297ca6ae --- /dev/null +++ b/addons/postgresql-cluster/18/plans/standard-8c32g200/values.yaml @@ -0,0 +1,82 @@ +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: hb-postgresql-cluster-standard-200 + +patroni: + pgParameters: + max_worker_processes: 16 + max_parallel_workers: 8 + max_connections: 2000 + max_slot_wal_keep_size: "10 GB" + +postgresql: + config: |- + # Connectivity + superuser_reserved_connections = 3 + + # Memory Settings + shared_buffers = '8192 MB' + work_mem = '32 MB' + maintenance_work_mem = '420 MB' + huge_pages = try # NB! requires also activation of huge pages via kernel params, see here for more: https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES + effective_cache_size = '22 GB' + effective_io_concurrency = 100 # concurrent IO only really activated if OS supports posix_fadvise function + random_page_cost = 1.25 # speed of random disk access relative to sequential access (1.0) + + # Monitoring + track_io_timing=on # measure exact block IO times + track_functions=pl # track execution times of pl-language procedures if any + + # Replication + max_wal_senders = 10 + synchronous_commit = on + + # Checkpointing: + checkpoint_timeout = '25 min' + checkpoint_completion_target = 0.9 + max_wal_size = '16 GB' + min_wal_size = '3 GB' + + # WAL writing + wal_compression = on + wal_buffers = -1 # auto-tuned by Postgres till maximum of segment size (16MB by default) + wal_writer_delay = 200ms + wal_writer_flush_after = 1MB + + # Background writer + bgwriter_delay = 200ms + bgwriter_lru_maxpages = 100 + bgwriter_lru_multiplier = 2.0 + bgwriter_flush_after = 0 + + # Parallel queries: + max_parallel_workers_per_gather = 4 + max_parallel_maintenance_workers = 4 + max_parallel_workers = 8 + parallel_leader_participation = on + + # Advanced features + enable_partitionwise_join = on + enable_partitionwise_aggregate = on + jit = off + max_slot_wal_keep_size = 20GB + track_wal_io_timing = on + maintenance_io_concurrency = 100 + +resources: + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 8000m + hugepages-2Mi: 60Mi + memory: 32Gi + requests: + cpu: 100m + memory: 1Gi + +persistentVolume: + enabled: true + size: 200Gi + +shmVolume: + sizeLimit: "16Gi" From 1140a3097b8132f2553477eb7995d3be6c838307 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Wed, 18 Mar 2026 10:30:22 +0800 Subject: [PATCH 91/93] chore(kakfa): update plans broker config --- .../kafka/3.6/plans/standard-16c32g3w/values.yaml | 14 ++++++++++++++ .../kafka/3.6/plans/standard-1c2g3w/values.yaml | 15 +++++++++++++++ .../kafka/3.6/plans/standard-24c64g3w/values.yaml | 14 ++++++++++++++ .../kafka/3.6/plans/standard-2c4g3w/values.yaml | 14 ++++++++++++++ .../kafka/3.6/plans/standard-4c8g3w/values.yaml | 14 ++++++++++++++ .../kafka/3.6/plans/standard-8c16g3w/values.yaml | 14 ++++++++++++++ 6 files changed, 85 insertions(+) diff --git a/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml b/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml index d2f33263..26f404b5 100644 --- a/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-16c32g3w/values.yaml @@ -26,3 +26,17 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 768Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx16G -Xms16G + resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 2 + memory: 4Gi \ No newline at end of file diff --git a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml index 2fef60fe..aeee46f2 100644 --- a/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-1c2g3w/values.yaml @@ -26,3 +26,18 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 16Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx1024m -Xms1024m + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + diff --git a/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml b/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml index 240dcbcf..eaef4da9 100644 --- a/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-24c64g3w/values.yaml @@ -26,3 +26,17 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 1Ti + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx32G -Xms32G + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi \ No newline at end of file diff --git a/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml b/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml index f3255063..3a32b044 100644 --- a/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-2c4g3w/values.yaml @@ -26,3 +26,17 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 64Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx2048m -Xms2048m + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi diff --git a/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml b/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml index 0c8f9cf3..7d1503d4 100644 --- a/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-4c8g3w/values.yaml @@ -26,3 +26,17 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 256Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx4G -Xms4G + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi \ No newline at end of file diff --git a/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml b/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml index 64e7a47d..4f237ab6 100644 --- a/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml +++ b/addons/kafka/3.6/plans/standard-8c16g3w/values.yaml @@ -26,3 +26,17 @@ controller: ## @param controller.persistence.size PVC Storage Request for Kafka data volume ## size: 512Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.heapOpts Kafka Java Heap size for broker-only nodes + ## + heapOpts: -Xmx8G -Xms8G + resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 1 + memory: 2Gi From 2b02ae2eab002df6641a4e3e8aadc74c32aace51 Mon Sep 17 00:00:00 2001 From: lijianguo Date: Thu, 19 Mar 2026 10:38:43 +0800 Subject: [PATCH 92/93] feat(kafka): add 4.2 version --- addons/index.yaml | 2 + addons/kafka/4.2/chart/kafka-4.2/Chart.yaml | 27 + addons/kafka/4.2/chart/kafka-4.2/README.md | 1564 +++++++++++ .../4.2/chart/kafka-4.2/templates/NOTES.txt | 336 +++ .../chart/kafka-4.2/templates/_helpers.tpl | 998 +++++++ .../kafka-4.2/templates/_init_containers.tpl | 513 ++++ .../templates/broker/config-secrets.yaml | 23 + .../kafka-4.2/templates/broker/configmap.yaml | 54 + .../chart/kafka-4.2/templates/broker/hpa.yaml | 43 + .../templates/broker/networkpolicy.yaml | 113 + .../chart/kafka-4.2/templates/broker/pdb.yaml | 30 + .../templates/broker/statefulset.yaml | 415 +++ .../templates/broker/svc-external-access.yaml | 75 + .../templates/broker/svc-headless.yaml | 45 + .../chart/kafka-4.2/templates/broker/vpa.yaml | 46 + .../chart/kafka-4.2/templates/ca-cert.yaml | 53 + .../4.2/chart/kafka-4.2/templates/cert.yaml | 56 + .../controller-eligible/config-secrets.yaml | 23 + .../controller-eligible/configmap.yaml | 56 + .../templates/controller-eligible/hpa.yaml | 43 + .../controller-eligible/networkpolicy.yaml | 119 + .../templates/controller-eligible/pdb.yaml | 30 + .../controller-eligible/statefulset.yaml | 426 +++ .../svc-external-access.yaml | 77 + .../controller-eligible/svc-headless.yaml | 48 + .../templates/controller-eligible/vpa.yaml | 45 + .../chart/kafka-4.2/templates/extra-list.yaml | 9 + .../kafka-4.2/templates/log4j2-configmap.yaml | 20 + .../templates/metrics/jmx-configmap.yaml | 70 + .../templates/metrics/jmx-servicemonitor.yaml | 49 + .../kafka-4.2/templates/metrics/jmx-svc.yaml | 38 + .../templates/metrics/prometheusrule.yaml | 20 + .../kafka-4.2/templates/provisioning/job.yaml | 348 +++ .../provisioning/serviceaccount.yaml | 17 + .../templates/provisioning/tls-secret.yaml | 21 + .../chart/kafka-4.2/templates/rbac/role.yaml | 26 + .../kafka-4.2/templates/rbac/rolebinding.yaml | 25 + .../templates/rbac/serviceaccount.yaml | 19 + .../chart/kafka-4.2/templates/secrets.yaml | 132 + .../4.2/chart/kafka-4.2/templates/svc.yaml | 76 + .../chart/kafka-4.2/templates/tls-secret.yaml | 65 + addons/kafka/4.2/chart/kafka-4.2/values.yaml | 2493 +++++++++++++++++ addons/kafka/4.2/meta.yaml | 99 + .../4.2/plans/standard-16c32g3w/bind.yaml | 109 + .../standard-16c32g3w/instance-schema.json | 12 + .../4.2/plans/standard-16c32g3w/meta.yaml | 6 + .../4.2/plans/standard-16c32g3w/values.yaml | 57 + .../kafka/4.2/plans/standard-1c2g3w/bind.yaml | 109 + .../standard-1c2g3w/instance-schema.json | 12 + .../kafka/4.2/plans/standard-1c2g3w/meta.yaml | 6 + .../4.2/plans/standard-1c2g3w/values.yaml | 57 + .../4.2/plans/standard-24c64g3w/bind.yaml | 109 + .../standard-24c64g3w/instance-schema.json | 12 + .../4.2/plans/standard-24c64g3w/meta.yaml | 6 + .../4.2/plans/standard-24c64g3w/values.yaml | 57 + .../kafka/4.2/plans/standard-2c4g3w/bind.yaml | 109 + .../standard-2c4g3w/instance-schema.json | 12 + .../kafka/4.2/plans/standard-2c4g3w/meta.yaml | 6 + .../4.2/plans/standard-2c4g3w/values.yaml | 57 + .../kafka/4.2/plans/standard-4c8g3w/bind.yaml | 109 + .../standard-4c8g3w/instance-schema.json | 12 + .../kafka/4.2/plans/standard-4c8g3w/meta.yaml | 6 + .../4.2/plans/standard-4c8g3w/values.yaml | 57 + .../4.2/plans/standard-8c16g3w/bind.yaml | 109 + .../standard-8c16g3w/instance-schema.json | 12 + .../4.2/plans/standard-8c16g3w/meta.yaml | 6 + .../4.2/plans/standard-8c16g3w/values.yaml | 59 + 67 files changed, 9893 insertions(+) create mode 100644 addons/kafka/4.2/chart/kafka-4.2/Chart.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/README.md create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/NOTES.txt create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/_helpers.tpl create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/_init_containers.tpl create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/config-secrets.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/configmap.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/hpa.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/networkpolicy.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/pdb.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/statefulset.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-external-access.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-headless.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/broker/vpa.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/ca-cert.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/cert.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/config-secrets.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/configmap.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/hpa.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/networkpolicy.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/pdb.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/statefulset.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-external-access.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-headless.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/vpa.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/extra-list.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/log4j2-configmap.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-configmap.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-servicemonitor.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-svc.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/metrics/prometheusrule.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/job.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/serviceaccount.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/tls-secret.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/rbac/role.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/rbac/rolebinding.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/rbac/serviceaccount.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/secrets.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/svc.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/templates/tls-secret.yaml create mode 100644 addons/kafka/4.2/chart/kafka-4.2/values.yaml create mode 100644 addons/kafka/4.2/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-16c32g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-16c32g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-16c32g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-16c32g3w/values.yaml create mode 100644 addons/kafka/4.2/plans/standard-1c2g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-1c2g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-1c2g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-1c2g3w/values.yaml create mode 100644 addons/kafka/4.2/plans/standard-24c64g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-24c64g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-24c64g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-24c64g3w/values.yaml create mode 100644 addons/kafka/4.2/plans/standard-2c4g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-2c4g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-2c4g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-2c4g3w/values.yaml create mode 100644 addons/kafka/4.2/plans/standard-4c8g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-4c8g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-4c8g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-4c8g3w/values.yaml create mode 100644 addons/kafka/4.2/plans/standard-8c16g3w/bind.yaml create mode 100644 addons/kafka/4.2/plans/standard-8c16g3w/instance-schema.json create mode 100644 addons/kafka/4.2/plans/standard-8c16g3w/meta.yaml create mode 100644 addons/kafka/4.2/plans/standard-8c16g3w/values.yaml diff --git a/addons/index.yaml b/addons/index.yaml index c5e13ead..46568398 100644 --- a/addons/index.yaml +++ b/addons/index.yaml @@ -63,6 +63,8 @@ entries: kafka: - version: 3.6 description: "Apache Kafka is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications." + - version: 4.2 + description: "Apache Kafka is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications." pmm: - version: 2.41 description: "Percona Monitoring and Management: an open source database monitoring, observability and management tool ." diff --git a/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml b/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml new file mode 100644 index 00000000..4ab73fdc --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml @@ -0,0 +1,27 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 4.2 +dependencies: + - name: common + repository: oci://registry.drycc.cc/charts + version: ~1.1.4 +description: Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. +home: https://github.com/drycc-addons/addons/tree/main/addons/kafka/4.2 +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png +keywords: +- kafka +- streaming +- producer +- consumer +maintainers: + - name: Drycc Community. + url: https://github.com/drycc-addons/addons +name: kafka +sources: +- https://github.com/drycc-addons/addons/tree/main/addons/kafka +version: 32.4.4 diff --git a/addons/kafka/4.2/chart/kafka-4.2/README.md b/addons/kafka/4.2/chart/kafka-4.2/README.md new file mode 100644 index 00000000..519d4e3a --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/README.md @@ -0,0 +1,1564 @@ + + +# Bitnami Secure Images Helm chart for Apache Kafka + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/kafka +``` + +> Tip: Did you know that this app is also available as a Kubernetes App on the Azure Marketplace? Kubernetes Apps are the easiest way to deploy Bitnami on AKS. Click [here](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/bitnami.kafka-cnab) to see the listing on Azure Marketplace. + +## Why use Bitnami Secure Images? + +Those are hardened, minimal CVE images built and maintained by Bitnami. Bitnami Secure Images are based on the cloud-optimized, security-hardened enterprise [OS Photon Linux](https://vmware.github.io/photon/). Why choose BSI images? + +- Hardened secure images of popular open source software with Near-Zero Vulnerabilities +- Vulnerability Triage & Prioritization with VEX Statements, KEV and EPSS Scores +- Compliance focus with FIPS, STIG, and air-gap options, including secure bill of materials (SBOM) +- Software supply chain provenance attestation through in-toto +- First class support for the internet’s favorite Helm charts + +Each image comes with valuable security metadata. You can view the metadata in [our public catalog here](https://app-catalog.vmware.com/bitnami/apps). Note: Some data is only available with [commercial subscriptions to BSI](https://bitnami.com/). + +![Alt text](https://github.com/bitnami/containers/blob/main/BSI%20UI%201.png?raw=true "Application details") +![Alt text](https://github.com/bitnami/containers/blob/main/BSI%20UI%202.png?raw=true "Packaging report") + +If you are looking for our previous generation of images based on Debian Linux, please see the [Bitnami Legacy registry](https://hub.docker.com/u/bitnamilegacy). + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/kafka +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 4 listeners: + +- One for controller communications. +- A second one for inter-broker communications. +- A third one for communications with clients within the K8s cluster. +- (optional) A fourth listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for controller and inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Configure the authentication protocols for client, controller and inter-broker communications by setting the `listeners.client.protocol`, `listeners.controller.protocol` and `listeners.interbroker.protocol` parameters to the desired ones, respectively. + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `sasl.client.users`/`sasl.client.passwords`: when enabling SASL authentication for communications with clients. +- `sasl.interbroker.user`/`sasl.interbroker.password`: when enabling SASL authentication for inter-broker communications. +- `sasl.controller.user`/`sasl.controller.password`: when enabling SASL authentication for controller communications. + +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka node you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `tls.existingSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `tls.keystorePassword` and `tls.truststorePassword` parameters to provide your passwords. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka nodes use the commands below to create the secrets: + +```console +kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +If, for some reason (like using CertManager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `tls.truststorePassword` +- `tls.jksTruststoreKey` to overwrite the default value of the truststore key (`kafka.truststore.jks`). + +> **Note**: If you are using CertManager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that CertManager creates. To handle this, the `tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) +> **Note**: The truststore/keystore from above **must** be protected with the same passwords set in the `tls.keystorePassword` and `tls.truststorePassword` parameters. + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +listeners.client.protocol=SASL +listeners.interbroker.protocol=TLS +tls.existingSecret=kafka-jks +tls.keystorePassword=jksPassword +tls.truststorePassword=jksPassword +sasl.client.users[0]=brokerUser +sasl.client.passwords[0]=brokerPassword + +``` + +By setting the following parameter: `listeners.client.protocol=SSL` and `listener.client.sslClientAuth=required`, Kafka will require the clients to authenticate to Kafka brokers via certificate. + +As result, we will be able to see in `kafka-authorizer.log` the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`. + +### Update credentials + +The Bitnami Kafka chart, when upgrading, reuses the secret previously rendered by the chart or the one specified in `sasl.existingSecret`. To update credentials, use one of the following: + +- Run `helm upgrade` specifying new credentials in the `sasl` section as explained in the [authentication section](#enable-security-for-kafka). +- Run `helm upgrade` specifying a new secret in `sasl.existingSecret` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.broker.service.type=LoadBalancer +externalAccess.controller.service.type=LoadBalancer +externalAccess.broker.service.ports.external=9094 +externalAccess.controller.service.ports.external=9094 +defaultInitContainers.autoDiscovery.enabled=true +serviceAccount.create=true +broker.automountServiceAccountToken=true +controller.automountServiceAccountToken=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.controller.service.type=LoadBalancer +externalAccess.controller.service.containerPorts.external=9094 +externalAccess.controller.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.controller.service.loadBalancerIPs[1]='external-ip-2' +externalAccess.broker.service.type=LoadBalancer +externalAccess.broker.service.ports.external=9094 +externalAccess.broker.service.loadBalancerIPs[0]='external-ip-3' +externalAccess.broker.service.loadBalancerIPs[1]='external-ip-4' +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.broker.service.type=NodePort + defaultInitContainers.autoDiscovery.enabled=true + serviceAccount.create=true + rbac.create=true + ``` + + Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.controller.service.nodePorts[0]='node-port-1' + externalAccess.controller.service.nodePorts[1]='node-port-2' + ``` + + Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + + The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess..service.domain` or `externalAccess..service.useHostIPs` is provided. + +- Option C) Manually specify distinct external IPs (using controller+broker nodes) + + ```console + externalAccess.enabled=true + externalAccess.controller.service.type=NodePort + externalAccess.controller.service.externalIPs[0]='172.16.0.20' + externalAccess.controller.service.externalIPs[1]='172.16.0.21' + externalAccess.controller.service.externalIPs[2]='172.16.0.22' + ``` + + Note: You need to know in advance the available IP of your cluster that will be exposed so each Kafka broker advertised listener is configured with it. + +#### Using ClusterIP services + +Note: This option requires that an ingress is deployed within your cluster + +```console +externalAccess.enabled=true +externalAccess.controller.service.type=ClusterIP +externalAccess.controller.service.ports.external=9094 +externalAccess.controller.service.domain='ingress-ip' +externalAccess.broker.service.type=ClusterIP +externalAccess.broker.service.ports.external=9094 +externalAccess.broker.service.domain='ingress-ip' +``` + +Note: the deployed ingress must contain the following block: + +```console +tcp: + 9094: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-0-external:9094" + 9095: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-1-external:9094" + 9096: "{{ include "common.names.namespace" . }}/{{ include "common.names.fullname" . }}-2-external:9094" +``` + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` values (check parameters table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### Prometheus metrics + +### Enable metrics + +This chart can be integrated with Prometheus by setting `metrics.jmx.enabled` to `true`. This will deploy a sidecar container with [jmx_exporter](https://github.com/prometheus/jmx_exporter) in all pods and a `metrics` service, which can be configured under the `metrics.jmx.service` section. This service will have the necessary annotations to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `metrics.serviceMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` parameters. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: + +```yaml +extraDeploy: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "common.names.fullname" . }}-connect + - | + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "common.names.fullname" . }}-controller-0.{{ include "common.names.fullname" . }}-controller-headless.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.ports.client }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - | + apiVersion: v1 + kind: Service + metadata: + name: {{ include "common.names.fullname" . }}-connect + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /bitnami/kafka/config/connect-standalone.properties /bitnami/kafka/config/mongo.properties +``` + +### Persistence + +The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. + +#### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +#### Backup and restore + +To back up and restore Helm chart deployments on Kubernetes, you need to back up the persistent volumes from the source deployment and attach them to a new deployment using [Velero](https://velero.io/), a Kubernetes backup/restore tool. Find the instructions for using Velero in [this guide](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-backup-restore-deployments-velero-index.html). + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.security.allowInsecureImages` | Allows skipping image verification | `false` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------------- | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `apiVersions` | Override Kubernetes API versions reported by .Capabilities | `[]` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the chart release | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the chart release | `["infinity"]` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | + +### Kafka common parameters + +| Name | Description | Value | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| `image.registry` | Kafka image registry | `REGISTRY_NAME` | +| `image.repository` | Kafka image repository | `REPOSITORY_NAME/kafka` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `clusterId` | Kafka Kraft cluster ID (ignored if existingKraftSecret is set). A random cluster ID will be generated the 1st time Kraft is initialized if not set. | `""` | +| `existingKraftSecret` | Name of the secret containing the Kafka KRaft Cluster ID and one directory ID per controller replica | `""` | +| `kraftVersion` | Kraft version to be used. It determines whether static quorum (kraftVersion=0) or dynamic quorum (kraftVersion=1) will be used. | `1` | +| `config` | Specify content for Kafka configuration (auto-generated based on other parameters otherwise) | `{}` | +| `overrideConfiguration` | Kafka common configuration override. Values defined here takes precedence over the ones defined at `config` | `{}` | +| `existingConfigmap` | Name of an existing ConfigMap with the Kafka configuration | `""` | +| `secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration (store in a secret) | `""` | +| `existingSecretConfig` | Secret with additional configuration that will be appended to the end of the generated Kafka configuration | `""` | +| `log4j2` | Specify content for Kafka log4j2 configuration (default one is used otherwise) | `""` | +| `existingLog4j2ConfigMap` | The name of an existing ConfigMap containing the log4j2.yaml file | `""` | +| `heapOpts` | Kafka Java Heap configuration | `-XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75` | +| `brokerRackAwareness.enabled` | Enable Kafka Rack Awareness | `false` | +| `brokerRackAwareness.cloudProvider` | Cloud provider to use to set Broker Rack Awareness. Allowed values: `aws-az`, `azure` | `""` | +| `brokerRackAwareness.azureApiVersion` | Metadata API version to use when brokerRackAwareness.cloudProvider is set to `azure` | `2023-11-15` | +| `interBrokerProtocolVersion` | Override the setting 'inter.broker.protocol.version' during the ZK migration. | `""` | +| `listeners.client.name` | Name for the Kafka client listener | `CLIENT` | +| `listeners.client.containerPort` | Port for the Kafka client listener | `9092` | +| `listeners.client.protocol` | Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.client.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.controller.name` | Name for the Kafka controller listener | `CONTROLLER` | +| `listeners.controller.containerPort` | Port for the Kafka controller listener | `9093` | +| `listeners.controller.protocol` | Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.controller.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.interbroker.name` | Name for the Kafka inter-broker listener | `INTERNAL` | +| `listeners.interbroker.containerPort` | Port for the Kafka inter-broker listener | `9094` | +| `listeners.interbroker.protocol` | Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.interbroker.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.external.containerPort` | Port for the Kafka external listener | `9095` | +| `listeners.external.protocol` | Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.external.name` | Name for the Kafka external listener | `EXTERNAL` | +| `listeners.external.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.extraListeners` | Array of listener objects to be appended to already existing listeners | `[]` | +| `listeners.overrideListeners` | Overrides the Kafka 'listeners' configuration setting. | `""` | +| `listeners.advertisedListeners` | Overrides the Kafka 'advertised.listener' configuration setting. | `""` | +| `listeners.securityProtocolMap` | Overrides the Kafka 'security.protocol.map' configuration setting. | `""` | + +### Kafka SASL parameters + +| Name | Description | Value | +| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | +| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | +| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | +| `sasl.oauthbearer.tokenEndpointUrl` | The URL for the OAuth/OIDC identity provider | `""` | +| `sasl.oauthbearer.jwksEndpointUrl` | The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved | `""` | +| `sasl.oauthbearer.expectedAudience` | The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences | `""` | +| `sasl.oauthbearer.subClaimName` | The OAuth claim name for the subject. | `sub` | +| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | +| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | +| `sasl.interbroker.clientId` | Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER | `inter_broker_client` | +| `sasl.interbroker.clientSecret` | Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. | `""` | +| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | +| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | +| `sasl.controller.clientId` | Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER | `controller_broker_client` | +| `sasl.controller.clientSecret` | Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. | `""` | +| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | +| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | +| `sasl.existingSecret` | Name of the existing secret containing credentials for client.users, interbroker.user and controller.user | `""` | + +### Kafka TLS parameters + +| Name | Description | Value | +| ------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM` | `JKS` | +| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `tls.autoGenerated.enabled` | Enable automatic generation of TLS certificates (only supported if `tls.type` is `PEM`) | `true` | +| `tls.autoGenerated.engine` | Mechanism to generate the certificates (allowed values: helm, cert-manager) | `helm` | +| `tls.autoGenerated.customAltNames` | List of additional subject alternative names (SANs) for the automatically generated TLS certificates. | `[]` | +| `tls.autoGenerated.certManager.existingIssuer` | The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine) | `""` | +| `tls.autoGenerated.certManager.existingIssuerKind` | Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine) | `""` | +| `tls.autoGenerated.certManager.keyAlgorithm` | Key algorithm for the certificates (only for `cert-manager` engine) | `RSA` | +| `tls.autoGenerated.certManager.keySize` | Key size for the certificates (only for `cert-manager` engine) | `2048` | +| `tls.autoGenerated.certManager.duration` | Duration for the certificates (only for `cert-manager` engine) | `2160h` | +| `tls.autoGenerated.certManager.renewBefore` | Renewal period for the certificates (only for `cert-manager` engine) | `360h` | +| `tls.existingSecret` | Name of the existing secret containing the TLS certificates for the Kafka nodes. | `""` | +| `tls.passwordsSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `tls.passwordsSecretKeystoreKey` | The secret key from the tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `tls.passwordsSecretTruststoreKey` | The secret key from the tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `tls.passwordsSecretPemPasswordKey` | The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. | `""` | +| `tls.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` | +| `tls.jksKeystoreKey` | The secret key from the `tls.existingSecret` containing the keystore | `""` | +| `tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` | `""` | +| `tls.jksTruststoreKey` | The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore | `""` | +| `tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `tls.sslClientAuth` | Sets the default value for the ssl.client.auth Kafka setting. | `required` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `dnsPolicy` | Specifies the DNS policy for the Kafka pods | `""` | +| `dnsConfig` | allows users more control on the DNS settings for a Pod. Required if `dnsPolicy` is set to `None` | `{}` | +| `defaultInitContainers.volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `defaultInitContainers.volumePermissions.image.registry` | "volume-permissions" init-containers' image registry | `REGISTRY_NAME` | +| `defaultInitContainers.volumePermissions.image.repository` | "volume-permissions" init-containers' image repository | `REPOSITORY_NAME/os-shell` | +| `defaultInitContainers.volumePermissions.image.digest` | "volume-permissions" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `defaultInitContainers.volumePermissions.image.pullPolicy` | "volume-permissions" init-containers' image pull policy | `IfNotPresent` | +| `defaultInitContainers.volumePermissions.image.pullSecrets` | "volume-permissions" init-containers' image pull secrets | `[]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.enabled` | Enabled "volume-permissions" init-containers' Security Context | `true` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in "volume-permissions" init-containers | `{}` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser` | Set runAsUser in "volume-permissions" init-containers' Security Context | `0` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.privileged` | Set privileged in "volume-permissions" init-containers' Security Context | `false` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "volume-permissions" init-containers' Security Context | `false` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.add` | List of capabilities to be added in "volume-permissions" init-containers | `[]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "volume-permissions" init-containers | `["ALL"]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "volume-permissions" init-containers | `RuntimeDefault` | +| `defaultInitContainers.volumePermissions.resourcesPreset` | Set Kafka "volume-permissions" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.volumePermissions.resources is set (defaultInitContainers.volumePermissions.resources is recommended for production). | `nano` | +| `defaultInitContainers.volumePermissions.resources` | Set Kafka "volume-permissions" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.enabled` | Enabled "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.seLinuxOptions` | Set SELinux options in "prepare-config" init-containers | `{}` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsUser` | Set runAsUser in "prepare-config" init-containers' Security Context | `1001` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsGroup` | Set runAsUser in "prepare-config" init-containers' Security Context | `1001` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "prepare-config" init-containers' Security Context | `true` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.privileged` | Set privileged in "prepare-config" init-containers' Security Context | `false` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "prepare-config" init-containers' Security Context | `false` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.add` | List of capabilities to be added in "prepare-config" init-containers | `[]` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "prepare-config" init-containers | `["ALL"]` | +| `defaultInitContainers.prepareConfig.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "prepare-config" init-containers | `RuntimeDefault` | +| `defaultInitContainers.prepareConfig.resourcesPreset` | Set Kafka "prepare-config" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.prepareConfig.resources is set (defaultInitContainers.prepareConfig.resources is recommended for production). | `nano` | +| `defaultInitContainers.prepareConfig.resources` | Set Kafka "prepare-config" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `defaultInitContainers.prepareConfig.extraInit` | Additional content for the "prepare-config" init script, rendered as a template. | `""` | +| `defaultInitContainers.autoDiscovery.enabled` | Enable init container that auto-detects external IPs/ports by querying the K8s API | `false` | +| `defaultInitContainers.autoDiscovery.image.registry` | "auto-discovery" init-containers' image registry | `REGISTRY_NAME` | +| `defaultInitContainers.autoDiscovery.image.repository` | "auto-discovery" init-containers' image repository | `REPOSITORY_NAME/os-shell` | +| `defaultInitContainers.autoDiscovery.image.digest` | "auto-discovery" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `defaultInitContainers.autoDiscovery.image.pullPolicy` | "auto-discovery" init-containers' image pull policy | `IfNotPresent` | +| `defaultInitContainers.autoDiscovery.image.pullSecrets` | "auto-discovery" init-containers' image pull secrets | `[]` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.enabled` | Enabled "auto-discovery" init-containers' Security Context | `true` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.seLinuxOptions` | Set SELinux options in "auto-discovery" init-containers | `{}` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.runAsUser` | Set runAsUser in "auto-discovery" init-containers' Security Context | `1001` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.runAsGroup` | Set runAsUser in "auto-discovery" init-containers' Security Context | `1001` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot in "auto-discovery" init-containers' Security Context | `true` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem` | Set readOnlyRootFilesystem in "auto-discovery" init-containers' Security Context | `true` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.privileged` | Set privileged in "auto-discovery" init-containers' Security Context | `false` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "auto-discovery" init-containers' Security Context | `false` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.add` | List of capabilities to be added in "auto-discovery" init-containers | `[]` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "auto-discovery" init-containers | `["ALL"]` | +| `defaultInitContainers.autoDiscovery.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "auto-discovery" init-containers | `RuntimeDefault` | +| `defaultInitContainers.autoDiscovery.resourcesPreset` | Set Kafka "auto-discovery" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.autoDiscovery.resources is set (defaultInitContainers.autoDiscovery.resources is recommended for production). | `nano` | +| `defaultInitContainers.autoDiscovery.resources` | Set Kafka "auto-discovery" init container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +### Controller-eligible statefulset parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | +| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | +| `controller.quorumBootstrapServers` | Override the Kafka controller quorum bootstrap servers of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-eligible nodes. | `""` | +| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | +| `controller.config` | Specify content for Kafka configuration for Kafka controller-eligible nodes (auto-generated based on other parameters otherwise) | `{}` | +| `controller.overrideConfiguration` | Kafka configuration override for Kafka controller-eligible nodes. Values defined here takes precedence over the ones defined at `controller.config` | `{}` | +| `controller.existingConfigmap` | Name of an existing ConfigMap with the Kafka configuration for Kafka controller-eligible nodes | `""` | +| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration for Kafka controller-eligible nodes (store in a secret) | `""` | +| `controller.existingSecretConfig` | Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka controller-eligible nodes | `""` | +| `controller.heapOpts` | Kafka Java Heap configuration for controller-eligible nodes | `-XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75` | +| `controller.command` | Override Kafka container command | `[]` | +| `controller.args` | Override Kafka container arguments | `[]` | +| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | +| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `controller.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). | `small` | +| `controller.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `controller.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `controller.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `controller.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | +| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `controller.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `controller.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set Kafka containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `controller.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `controller.hostAliases` | Kafka pods host aliases | `[]` | +| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `controller.podLabels` | Extra labels for Kafka pods | `{}` | +| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `controller.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `controller.affinity` | Affinity for pod assignment | `{}` | +| `controller.nodeSelector` | Node labels for pod assignment | `{}` | +| `controller.tolerations` | Tolerations for pod assignment | `[]` | +| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `controller.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | + +### Experimental: Kafka Controller Autoscaling configuration + +| Name | Description | Value | +| ------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `controller.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `controller.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `controller.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `controller.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `controller.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `controller.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `controller.autoscaling.hpa.enabled` | Enable HPA for Kafka Controller | `false` | +| `controller.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `controller.autoscaling.hpa.minReplicas` | Minimum number of Kafka Controller replicas | `""` | +| `controller.autoscaling.hpa.maxReplicas` | Maximum number of Kafka Controller replicas | `""` | +| `controller.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `controller.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `true` | +| `controller.pdb.minAvailable` | Minimum number/percentage of available Kafka replicas | `""` | +| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `controller.persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` | +| `controller.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `controller.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | +| `controller.persistence.enabled` | Enable Kafka data persistence using PVC | `true` | +| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `controller.persistence.annotations` | Annotations for the PVC | `{}` | +| `controller.persistence.labels` | Labels for the PVC | `{}` | +| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC | `false` | +| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Broker-only statefulset parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | +| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | +| `broker.config` | Specify content for Kafka configuration for Kafka broker-only nodes (auto-generated based on other parameters otherwise) | `{}` | +| `broker.overrideConfiguration` | Kafka configuration override for Kafka broker-only nodes. Values defined here takes precedence over the ones defined at `broker.config` | `{}` | +| `broker.existingConfigmap` | Name of an existing ConfigMap with the Kafka configuration for Kafka broker-only nodes | `""` | +| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration for Kafka broker-only nodes (store in a secret) | `""` | +| `broker.existingSecretConfig` | Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka broker-only nodes | `""` | +| `broker.heapOpts` | Kafka Java Heap configuration for broker-only nodes | `-XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75` | +| `broker.command` | Override Kafka container command | `[]` | +| `broker.args` | Override Kafka container arguments | `[]` | +| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | +| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `broker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). | `small` | +| `broker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `broker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `broker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `broker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | +| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `broker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `broker.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `broker.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `broker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `broker.hostAliases` | Kafka pods host aliases | `[]` | +| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `broker.podLabels` | Extra labels for Kafka pods | `{}` | +| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `broker.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `broker.affinity` | Affinity for pod assignment | `{}` | +| `broker.nodeSelector` | Node labels for pod assignment | `{}` | +| `broker.tolerations` | Tolerations for pod assignment | `[]` | +| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `broker.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `true` | +| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | + +### Experimental: Kafka Broker Autoscaling configuration + +| Name | Description | Value | +| --------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `broker.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `broker.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `broker.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `broker.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `broker.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `broker.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `broker.autoscaling.hpa.enabled` | Enable HPA for Kafka Broker | `false` | +| `broker.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `broker.autoscaling.hpa.minReplicas` | Minimum number of Kafka Broker replicas | `""` | +| `broker.autoscaling.hpa.maxReplicas` | Maximum number of Kafka Broker replicas | `""` | +| `broker.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `broker.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | +| `broker.persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` | +| `broker.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `broker.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | +| `broker.persistence.enabled` | Enable Kafka data persistence using PVC | `true` | +| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `broker.persistence.annotations` | Annotations for the PVC | `{}` | +| `broker.persistence.labels` | Labels for the PVC | `{}` | +| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC | `false` | +| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Traffic Exposure parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.controller` | Kafka svc port for controller connections | `9093` | +| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | +| `service.ports.external` | Kafka svc port for external connections | `9095` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerClass` | Kafka service Load Balancer Class | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | +| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | +| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | +| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | +| `service.headless.ipFamilies` | IP families for the headless service | `[]` | +| `service.headless.ipFamilyPolicy` | IP family policy for the headless service | `""` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | +| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.controller.service.loadBalancerClass` | Kubernetes Service Load Balancer class for external access when service type is LoadBalancer | `""` | +| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.controller.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | +| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.controller.service.ipFamilies` | IP families for the external controller service | `[]` | +| `externalAccess.controller.service.ipFamilyPolicy` | IP family policy for the external controller service | `""` | +| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.broker.service.loadBalancerClass` | Kubernetes Service Load Balancer class for external access when service type is LoadBalancer | `""` | +| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.broker.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | +| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.broker.service.ipFamilies` | IP families for the external broker service | `[]` | +| `externalAccess.broker.service.ipFamilyPolicy` | IP family policy for the external broker service | `""` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.addExternalClientAccess` | Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressPodMatchLabels` | Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. | `{}` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. | `{}` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + +### Metrics parameters + +| Name | Description | Value | +| --------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `micro` | +| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.jmx.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.jmx.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `metrics.jmx.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.jmx.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `metrics.jmx.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.jmx.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.jmx.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.jmx.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.jmx.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.jmx.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `metrics.jmx.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.jmx.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.service.ipFamilies` | IP families for the jmx metrics service | `[]` | +| `metrics.jmx.service.ipFamilyPolicy` | IP family policy for the jmx metrics service | `""` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.path` | Path where JMX exporter serves metrics | `/metrics` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable Kafka provisioning Job | `false` | +| `provisioning.waitForKafka` | Whether an init container should be created to wait until Kafka is ready before provisioning | `true` | +| `provisioning.useHelmHooks` | Flag to indicate usage of helm hooks | `true` | +| `provisioning.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. /shared/client.properties is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. /shared/client.properties is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `true` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `provisioning.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). | `micro` | +| `provisioning.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `provisioning.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `provisioning.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `provisioning.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | +| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | +| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | + +```console +helm install my-release \ + --set controller.replicaCount=3 \ + oci://REGISTRY_NAME/REPOSITORY_NAME/kafka +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command deploys Kafka with 3 Kafka controller-eligible nodes. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/kafka +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/kafka/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 32.3.0 + +We have introduced the value `kraftVersion` to help control the change from static quorum to dynamic quorum. + +By default, new clusters will be deployed with the new dynamic quorum (kraftVersion=1), but users upgrading from Kafka 3.x may need to modify the default values to continue using static quorum (kraftVersion=0). +That is because Kafka 4.0 does not yet support switching from static quorum (controller.quorum.voters) to dynamic quorum (controller.quorum.bootstrap.servers), causing upgrades to fail (#34015). + +For more information please check [Kafka documentation](https://kafka.apache.org/documentation/#static_versus_dynamic_kraft_quorums). + +### To 32.0.0 + +This major release bumps Kafka major version to `4.y.z` series. This version implies a significant milestone given now Kafka operates operate entirely without Apache ZooKeeper, running in KRaft mode by default. As a consequence, **ZooKeeper is no longer a chart dependency and every related parameter has been removed.**. Upgrading from `31.y.z` chart version is not supported unless KRaft mode was already enabled. + +Also, some KRaft-related parameters have been renamed or removed: + +- `kraft.enabled` has been removed. Kafka now operates in KRaft mode by default. +- `kraft.controllerQuorumVoters` has been renamed to `controller.quorumVoters`. +- `kraft.clusterId` and `kraft.existingClusterIdSecret` have been renamed to `clusterId` and `existingKraftSecret`, respectively. + +Other notable changes: + +- `log4j` and `existingLog4jConfig` parameters have been renamed to `log4j2` and `existingLog4j2ConfigMap`, respectively. +- `controller.quorumVoters` has been removed in favor of `controller.quorumBootstrapServers`. +- `brokerRackAssignment` and `brokerRackAssignmentApiVersion` are deprecated in favor of `brokerRackAwareness.*` parameters. +- `tls.autoGenerated` boolean is now an object with extended configuration options. +- `volumePermissions` parameters have been moved under `defaultInitContainers` parameter. +- `externalAccess.autoDiscovery` parameters have been moved under `defaultInitContainers` parameter. +- `controller.initContainerResources` and `broker.initContainerResources` have been removed. Use `defaultInitContainers.prepareConfig.resources` instead. +- `extraInit` has been renamed to `defaultInitContainers.prepareConfig.extraInit`. +- `extraConfig` has been replaced with `overrideConfiguration`. +- provisioning variable `$CLIENT_CONF` has been removed use `/shared/client.properties`. +- If using your own SSL certs you will need to disable `tls.autoGenerated.enabled` + +### To 31.1.0 + +This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). + +### To 31.0.0 + +This major release bumps the Kafka version to 3.9. Find notable changes in [kafka upgrade notes](https://kafka.apache.org/39/documentation.html#upgrade). + +### To 30.0.0 + +This major release bumps the Kafka version to 3.8. Find notable changes in [kafka upgrade notes](https://kafka.apache.org/38/documentation.html#upgrade). + +### To 29.0.0 + +This major version of Kafka deprecates Kafka Exporter component. + +### To 28.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. +- The `networkPolicy` section has been normalized amongst all Bitnami charts. Compared to the previous approach, the values section has been simplified (check the Parameters section) and now it set to `enabled=true` by default. Egress traffic is allowed by default and ingress traffic is allowed by all pods but only to the ports set in `containerPorts` and `extraContainerPorts`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +### To 26.0.0 + +This major release bumps the Kafka version to 3.6 [kafka upgrade notes](https://kafka.apache.org/36/documentation.html#upgrade). + +### To 25.0.0 + +This major updates the Zookeeper subchart to it newest major, 12.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1200). + +### To 24.0.0 + +This major version is a refactor of the Kafka chart and its architecture, to better adapt to Kraft features introduced in version 22.0.0. + +The changes introduced in this version are: + +- New architecture. The chart now has two statefulsets, one for controller-eligible nodes (controller or controller+broker) and another one for broker-only nodes. Please take a look at the subsections [Upgrading from Kraft mode](#upgrading-from-kraft-mode) and [Upgrading from Zookeeper mode](#upgrading-from-zookeeper-mode) for more information about how to upgrade this chart depending on which mode you were using. + + The new architecture is designed to support two main features: + - Deployment of dedicated nodes + - Support for Zookeeper to Kraft migration + +- Adds compatibility with `securityContext.readOnlyRootFs=true`, which is now the execution default. + - The Kafka configuration is now mounted as a ConfigMap instead of generated at runtime. + - Due to the implementation of readOnlyRootFs support, the following settings have been removed and will now rely on Kafka defaults. To override them, please use `extraConfig` to extend your Kafka configuration instead. + - `deleteTopicEnable` + - `autoCreateTopicsEnable` + - `logFlushIntervalMessages` + - `logFlushIntervalMs` + - `logRetentionBytes` + - `logRetentionCheckIntervalMs` + - `logRetentionHours` + - `logSegmentBytes` + - `logsDirs` + - `maxMessageBytes` + - `defaultReplicationFactor` + - `offsetsTopicReplicationFactor` + - `transactionStateLogReplicationFactor` + - `transactionStateLogMinIsr` + - `numIoThreads` + - `numNetworkThreads` + - `numPartitions` + - `numRecoveryThreadsPerDataDir` + - `socketReceiveBufferBytes` + - `socketRequestMaxBytes` + - `socketSendBufferBytes` + - `zookeeperConnectionTimeoutMs` + - `authorizerClassName` + - `allowEveryoneIfNoAclFound` + - `superUsers` +- All listeners are configured with protocol 'SASL_PLAINTEXT' by default. +- Support for SCRAM authentication in KRaft mode +- All statefulset settings have been moved from values' root to `controller.*` and `broker.*`. +- Refactor of listeners configuration: + - Settings `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` have been replaced with `listeners.*` object, which includes default listeners and each listener can be configured individually and extended using `listeners.extraListeners`. + - Values `interBrokerListenerName`, `allowPlaintextListener` have been removed. +- Refactor of SASL, SSL and ACL settings: + - Authentication nomenclature `plaintext,tls,mtls,sasl,sasl_tls` has been removed. Listeners are now configured using Kafka nomenclature `PLAINTEXT,SASL_PLAINTEXT,SASL_SSL,SSL` in `listeners.*.protocol`. + - mTLS is configured by default for SSL protocol listeners, while it can now also be configured for SASL_SSL listeners if `listener.*.sslClientAuth` is set. + - All SASL settings are now grouped under `sasl.*`. + - `auth.sasl.mechanisms` -> `sasl.enabledMechanisms` + - `auth.interBrokerMechanism` -> `sasl.interBrokerMechanism` + - `auth.sasl.jaas.clientUSers` -> `sasl.client.users` + - `auth.sasl.jaas.clientPasswords` -> `sasl.client.passwords` + - `auth.sasl.jaas.interBrokerUser` -> `sasl.interbroker.user` + - `auth.sasl.jaas.interBrokerPassword` -> `sasl.interbroker.password` + - `auth.sasl.jaas.zookeeperUser` -> `sasl.zookeeper.user` + - `auth.sasl.jaas.zookeeperPassword` -> `sasl.zookeeper.password` + - `auth.sasl.jaas.existingSecret` -> `sasl.existingSecret` + - Added support for Controller listener protocols other than PLAINTEXT. + - TLS settings have been moved from `auth.tls.*` to `tls.*`. + - Zookeeper TLS settings have been moved from `auth.zookeeper*` to `tls.zookeeper.*` +- Refactor externalAccess to support the new architecture: + - `externalAccess.service.*` have been renamed to `externalAccess.controller.service.*` and `externalAccess.broker.service.*`. + - Controller pods will not configure externalAccess unless either: + - `controller.controllerOnly=false` (default), meaning the pods are running as 'controller+broker' nodes; or + - `externalAccess.controller.service.forceExpose=true`, for use cases where controller-only nodes want to be exposed externally. +- TLS certificates value `tls.existingSecret` no longer supports an array of secrets (1 secret per node). It now accepts a single secret containing multiple certificates named `kafka--` for each Kafka pod, or alternatively, a single certificate shared by all Kafka nodes using wildcard CN and/or SubjectAltNames. **NOTE**: If using CertManager to automatically generate the certificate secrets, only the single certificate approach would be supported. + +#### Upgrading from Kraft mode + +If upgrading from Kraft mode, existing PVCs from Kafka containers should be reattached to 'controller' pods. + +#### Upgrading from Zookeeper mode + +If upgrading from Zookeeper mode, make sure you set 'controller.replicaCount=0' and reattach the existing PVCs to 'broker' pods. +This will allow you to perform a migration to Kraft mode in the future by following the following section. + +##### Migrating from Zookeeper (Early access) + +This guide is an adaptation from upstream documentation: [Migrate from ZooKeeper to KRaft](https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html) + +1. Retrieve the cluster ID from Zookeeper: + + ```console + $ kubectl exec -it -- zkCli.sh get /cluster/id + /opt/bitnami/java/bin/java + Connecting to localhost:2181 + + WATCHER:: + + WatchedEvent state:SyncConnected type:None path:null + {"version":"1","id":"TEr3HVPvTqSWixWRHngP5g"} + ``` + +2. Deploy at least one Kraft controller-only in your deployment and enable `zookeeperMigrationMode=true`. The Kraft controllers will migrate the data from your Kafka ZkBroker to Kraft mode. + + To do so add the following values to your Zookeeper deployment when upgrading: + + ```yaml + controller: + replicaCount: 1 + controllerOnly: true + zookeeperMigrationMode: true + # If needed, set controllers minID to avoid conflict with your ZK brokers' ids. + # minID: 0 + broker: + zookeeperMigrationMode: true + kraft: + enabled: true + clusterId: "" + ``` + +3. Wait until until all brokers are ready. You should see the following log in the broker logs: + + ```console + INFO [KafkaServer id=100] Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker (kafka.server.KafkaServer) + INFO [BrokerLifecycleManager id=100 isZkBroker=true] The broker has been unfenced. Transitioning from RECOVERY to RUNNING. (kafka.server.BrokerLifecycleManager) + ``` + + In the controllers, the following message should show up: + + ```console + Transitioning ZK migration state from PRE_MIGRATION to MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +4. Once all brokers have been successfully migrated, set `broker.zookeeperMigrationMode=false` to fully migrate them. + + ```yaml + broker: + zookeeperMigrationMode: false + ``` + +5. To conclude the migration, switch off migration mode on controllers and stop Zookeeper: + + ```yaml + controller: + zookeeperMigrationMode: false + zookeeper: + enabled: false + ``` + + After migration is complete, you should see the following message in your controllers: + + ```console + [2023-07-13 13:07:45,226] INFO [QuorumController id=1] Transitioning ZK migration state from MIGRATION to POST_MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +6. (**Optional**) If you would like to switch to a non-dedicated cluster, set `controller.controllerOnly=false`. This will cause controller-only nodes to switch to controller+broker nodes. + + At that point, you could manually decommission broker-only nodes by reassigning its partitions to controller-eligible nodes. + + For more information about decommissioning kafka broker check the [Kafka documentation](https://www.confluent.io/blog/remove-kafka-brokers-from-any-cluster-the-easy-way/). + +#### Retaining PersistentVolumes + +When upgrading the Kafka chart, you may want to retain your existing data. To do so, we recommend following this guide: + +**NOTE**: This guide requires the binaries 'kubectl' and 'jq'. + +```console +# Env variables +REPLICA=0 +OLD_PVC="data--kafka-${REPLICA}" +NEW_PVC="data--kafka--${REPLICA}" +PV_NAME=$(kubectl get pvc $OLD_PVC -o jsonpath="{.spec.volumeName}") +NEW_PVC_MANIFEST_FILE="$NEW_PVC.yaml" + +# Modify PV reclaim policy +kubectl patch pv $PV_NAME -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +# Manually check field 'RECLAIM POLICY' +kubectl get pv $PV_NAME + +# Create new PVC manifest +kubectl get pvc $OLD_PVC -o json | jq " + .metadata.name = \"$NEW_PVC\" + | with_entries( + select([.key] | + inside([\"metadata\", \"spec\", \"apiVersion\", \"kind\"])) + ) + | del( + .metadata.annotations, .metadata.creationTimestamp, + .metadata.finalizers, .metadata.resourceVersion, + .metadata.selfLink, .metadata.uid + ) + " > $NEW_PVC_MANIFEST_FILE +# Check manifest +cat $NEW_PVC_MANIFEST_FILE + +# Delete your old Statefulset and PVC +kubectl delete sts "-kafka" +kubectl delete pvc $OLD_PVC +# Make PV available again and create the new PVC +kubectl patch pv $PV_NAME -p '{"spec":{"claimRef": null}}' +kubectl apply -f $NEW_PVC_MANIFEST_FILE +``` + +Repeat this process for each replica you had in your Kafka cluster. Once completed, upgrade the cluster and the new Statefulset should reuse the existing PVCs. + +### To 23.0.0 + +This major updates Kafka to its newest version, 3.5.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/35/documentation.html#upgrade). + +### To 22.0.0 + +This major updates the Kafka's configuration to use Kraft by default. You can learn more about this configuration [here](https://developer.confluent.io/learn/kraft). Apart from seting the `kraft.enabled` parameter to `true`, we also made the following changes: + +- Renamed `minBrokerId` parameter to `minId` to set the minimum ID to use when configuring the node.id or broker.id parameter depending on the Kafka's configuration. This parameter sets the `KAFKA_CFG_NODE_ID` env var in the container. +- Updated the `containerPorts` and `service.ports` parameters to include the new controller port. + +### To 21.0.0 + +This major updates Kafka to its newest version, 3.4.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/34/documentation.html#upgrade). + +### To 20.0.0 + +This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). + +### To 19.0.0 + +This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade). + +### To 18.0.0 + +This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000). + +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +### To 11.8.0 + +External access to brokers can now be achieved through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see ). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka oci://REGISTRY_NAME/REPOSITORY_NAME/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka oci://REGISTRY_NAME/REPOSITORY_NAME/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +## License + +Copyright © 2025 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/NOTES.txt b/addons/kafka/4.2/chart/kafka-4.2/templates/NOTES.txt new file mode 100644 index 00000000..dd897e58 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/NOTES.txt @@ -0,0 +1,336 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/drycc/scripts/kafka/entrypoint.sh /opt/drycc/scripts/kafka/run.sh + +{{- else }} + +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq (upper .Values.listeners.external.protocol) "PLAINTEXT") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $clientPort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $clientPort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- if not .Values.controller.controllerOnly }} +{{- range $i := until (int .Values.controller.replicaCount) }} +{{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $clientPort) }} +{{- end }} +{{- end }} +{{- range $i := until (int .Values.broker.replicaCount) }} +{{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $clientPort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} +{{- $clientSaslEnabled := regexFind "SASL" (upper .Values.listeners.client.protocol) }} +{{- $clientSslEnabled := regexFind "SSL" (upper .Values.listeners.client.protocol) }} +{{- $clientMTlsEnabled := or (and .Values.listeners.client.sslClientAuth (not (eq .Values.listeners.client.sslClientAuth "none"))) (and (empty .Values.listeners.client.sslClientAuth) (not (eq .Values.tls.sslClientAuth "none"))) }} +{{- if or $clientSaslEnabled $clientSslEnabled }} + +The {{ upper .Values.listeners.client.name }} listener for Kafka client connections from within your cluster have been configured with the following security settings: + {{- if $clientSaslEnabled }} + - SASL authentication + {{- end }} + {{- if $clientSslEnabled }} + - TLS encryption + {{- end }} + {{- if and $clientSslEnabled $clientMTlsEnabled }} + - mTLS authentication + {{- end }} + +To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below: + +security.protocol={{ .Values.listeners.client.protocol }} +{{- if $clientSaslEnabled }} +{{- if regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms ) }} +sasl.jaas.config="org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required"\ + clientId="" \ + password=""; +sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler +sasl.oauthbearer.token.endpoint.url={{ .Values.sasl.oauthbearer.tokenEndpointUrl }} +{{- else }} +{{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-512 +{{- else if regexFind "PLAIN" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=PLAIN +{{- end }} +{{- $securityModule := ternary "org.apache.kafka.common.security.scram.ScramLoginModule required" "org.apache.kafka.common.security.plain.PlainLoginModule required" (regexMatch "SCRAM" (upper .Values.sasl.enabledMechanisms)) }} +sasl.jaas.config={{ $securityModule }} \ + username="{{ index .Values.sasl.client.users 0 }}" \ + password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +{{- end }} +{{- end }} +{{- if $clientSslEnabled }} +{{- $clientTlsType := upper .Values.tls.type }} +ssl.truststore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.truststore.location=/tmp/kafka.truststore.jks +# Uncomment this line if your client truststore is password protected +#ssl.truststore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if and $clientMTlsEnabled }} +ssl.keystore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.keystore.location=/tmp/client.keystore.jks +# Uncomment this line if your client truststore is password protected +#ssl.keystore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- +{{- end }} +{{- end }} +{{- if eq .Values.tls.endpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} +{{- end }} +{{- end }} + +To create a pod that you can use as a Kafka client run the following commands: + + kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity + {{- if or $clientSaslEnabled $clientSslEnabled }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties + {{- end }} + {{- if and $clientSslEnabled (eq (upper .Values.tls.type) "JKS") }} + kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks + {{- if $clientMTlsEnabled }} + kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks + {{- end }} + {{- end }} + kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash + + PRODUCER: + kafka-console-producer.sh \ + {{- if or $clientSaslEnabled $clientSslEnabled }} + --command-config--producer.config /tmp/client.properties \ + {{- end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test + + CONSUMER: + kafka-console-consumer.sh \ + {{- if or $clientSaslEnabled $clientSslEnabled }} + --command-config /tmp/client.properties \ + {{- end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test \ + --from-beginning + +{{- if .Values.externalAccess.enabled }} +{{- if or (not .Values.controller.controllerOnly) .Values.externalAccess.controller.forceExpose }} + +{{- if and .Values.controller.controllerOnly .Values.externalAccess.controller.forceExpose }} +To connect to your Kafka controller-only nodes from outside the cluster, follow these instructions: +{{- else }} +To connect to your Kafka controller+broker nodes from outside the cluster, follow these instructions: +{{- end }} + +{{- if eq "NodePort" .Values.externalAccess.controller.service.type }} + {{- if .Values.externalAccess.controller.service.domain }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.controller.service.domain }} + + {{- else }} + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/drycc/kafka/config/server.properties | grep advertised.listeners + + {{- end }} + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if eq "LoadBalancer" .Values.externalAccess.controller.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.controller.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.controller.service.type }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.controller.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.controller.service.ports.external }} + +{{- end }} +{{- end }} + +{{- $brokerReplicaCount := int .Values.broker.replicaCount -}} +{{- if gt $brokerReplicaCount 0 }} +To connect to your Kafka broker nodes from outside the cluster, follow these instructions: + +{{- if eq "NodePort" .Values.externalAccess.broker.service.type }} + {{- if .Values.externalAccess.broker.service.domain }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.broker.service.domain }} + + {{- else }} + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/drycc/kafka/config/server.properties | grep advertised.listeners + + {{- end }} + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if eq "LoadBalancer" .Values.externalAccess.broker.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ include "common.names.namespace" . }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.broker.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.broker.service.type }} + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.broker.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.broker.service.ports.external }} + +{{- end }} +{{- end }} +{{- if or $clientSaslEnabled $clientSslEnabled }} +{{- $externalSaslEnabled := regexFind "SASL" (upper .Values.listeners.external.protocol) }} +{{- $externalSslEnabled := regexFind "SSL" (upper .Values.listeners.external.protocol) }} +{{- $externalMTlsEnabled := or (and .Values.listeners.external.sslClientAuth (not (eq .Values.listeners.external.sslClientAuth "none"))) (and (empty .Values.listeners.external.sslClientAuth) (not (eq .Values.tls.sslClientAuth "none"))) }} + +The {{ upper .Values.listeners.external.name }} listener for Kafka client connections from within your cluster have been configured with the following settings: + {{- if $externalSaslEnabled }} + - SASL authentication + {{- end }} + {{- if $externalSslEnabled }} + - TLS encryption + {{- end }} + {{- if and $externalSslEnabled $externalMTlsEnabled }} + - mTLS authentication + {{- end }} + +To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below: + +security.protocol={{ .Values.listeners.external.protocol }} +{{- if $externalSaslEnabled }} +{{- if regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms ) }} +sasl.jaas.config="org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required"\ + clientId="" \ + password=""; +sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler +sasl.oauthbearer.token.endpoint.url={{ .Values.sasl.oauthbearer.tokenEndpointUrl }} +{{- else }} +{{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- $securityModule := ternary "org.apache.kafka.common.security.scram.ScramLoginModule required" "org.apache.kafka.common.security.plain.PlainLoginModule required" (regexMatch "SCRAM" (upper .Values.sasl.enabledMechanisms)) }} +sasl.jaas.config={{ $securityModule }} \ + username="{{ index .Values.sasl.client.users 0 }}" \ + password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +{{- end }} +{{- end }} +{{- if $externalSslEnabled }} +{{- $clientTlsType := upper .Values.tls.type }} +ssl.truststore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.truststore.location=/tmp/kafka.truststore.jks +# Uncomment this line if your client truststore is password protected +#ssl.truststore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if and $externalMTlsEnabled }} +ssl.keystore.type={{ $clientTlsType }} +{{- if eq $clientTlsType "JKS" }} +ssl.keystore.location=/tmp/client.keystore.jks +# Uncomment this line if your client truststore is password protected +#ssl.keystore.password= +{{- else if eq $clientTlsType "PEM" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- +{{- end }} +{{- end }} +{{- if eq .Values.tls.endpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + +{{- include "common.warnings.resources" (dict "sections" (list "broker" "controller" "metrics.jmx" "provisioning" "defaultInitContainers.volumePermissions" "defaultInitContainers.prepareConfig" "defaultInitContainers.autoDiscovery") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.defaultInitContainers.autoDiscovery.image .Values.metrics.jmx.image) "context" $) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.defaultInitContainers.autoDiscovery.image .Values.metrics.jmx.image) "context" $) }} +{{- include "kafka.checkRollingTags" . }} +{{- include "kafka.validateValues" . }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/_helpers.tpl b/addons/kafka/4.2/chart/kafka-4.2/templates/_helpers.tpl new file mode 100644 index 00000000..f17c9a85 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/_helpers.tpl @@ -0,0 +1,998 @@ +{{/* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Kafka controller-eligible fullname +*/}} +{{- define "kafka.controller.fullname" -}} +{{- printf "%s-controller" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Kafka broker fullname +*/}} +{{- define "kafka.broker.fullname" -}} +{{- printf "%s-broker" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "kafka.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka image name +*/}} +{{- define "kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kafka.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.defaultInitContainers.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "kafka.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.defaultInitContainers.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper JMX exporter image name +*/}} +{{- define "kafka.metrics.jmx.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kafka.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.defaultInitContainers.autoDiscovery.image .Values.metrics.jmx.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return true if encryption via TLS for client connections should be configured +*/}} +{{- define "kafka.sslEnabled" -}} +{{- $res := "" -}} +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker .Values.listeners.controller -}} +{{- range $i := .Values.listeners.extraListeners -}} +{{- $listeners = append $listeners $i -}} +{{- end -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SSL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if SASL connections should be configured +*/}} +{{- define "kafka.saslEnabled" -}} +{{- $res := "" -}} +{{- if include "kafka.client.saslEnabled" . -}} +{{- $res = "true" -}} +{{- else -}} +{{- $listeners := list .Values.listeners.interbroker .Values.listeners.controller -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SASL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if SASL connections should be configured +*/}} +{{- define "kafka.client.saslEnabled" -}} +{{- $res := "" -}} +{{- $listeners := list .Values.listeners.client -}} +{{- range $i := .Values.listeners.extraListeners -}} +{{- $listeners = append $listeners $i -}} +{{- end -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- range $listener := $listeners -}} +{{- if regexFind "SASL" (upper $listener.protocol) -}} +{{- $res = "true" -}} +{{- end -}} +{{- end -}} +{{- if $res -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if a SASL mechanism that uses usernames and passwords is in use +*/}} +{{- define "kafka.saslUserPasswordsEnabled" -}} +{{- if (include "kafka.saslEnabled" .) -}} +{{- if or (regexFind "PLAIN" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) -}} +true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if a SASL mechanism that uses client IDs and client secrets is in use +*/}} +{{- define "kafka.saslClientSecretsEnabled" -}} +{{- if (include "kafka.saslEnabled" .) -}} +{{- if (regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms)) -}} +true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the security module based on the provided sasl mechanism +*/}} +{{- define "kafka.saslSecurityModule" -}} +{{- if eq "PLAIN" .mechanism -}} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- else if regexFind "SCRAM" .mechanism -}} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else if eq "OAUTHBEARER" .mechanism -}} +org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka Kraft secret +*/}} +{{- define "kafka.kraftSecretName" -}} +{{- if .Values.existingKraftSecret -}} + {{- print (tpl .Values.existingKraftSecret .) -}} +{{- else -}} + {{- printf "%s-kraft" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka SASL credentials secret +*/}} +{{- define "kafka.saslSecretName" -}} +{{- if .Values.sasl.existingSecret -}} + {{- print (tpl .Values.sasl.existingSecret .) -}} +{{- else -}} + {{- printf "%s-user-passwords" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a SASL credentials secret object should be created +*/}} +{{- define "kafka.createSaslSecret" -}} +{{- if and (include "kafka.saslEnabled" .) (empty .Values.sasl.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.tlsSecretName" -}} +{{- if .Values.tls.existingSecret -}} + {{- print (tpl .Values.tls.existingSecret .) -}} +{{- else -}} + {{- printf "%s-tls" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsSecret" -}} +{{- if and (include "kafka.sslEnabled" .) (empty .Values.tls.existingSecret) .Values.tls.autoGenerated.enabled (eq .Values.tls.autoGenerated.engine "helm") -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a Certificate object should be created +*/}} +{{- define "kafka.createCertificate" -}} +{{- if and (include "kafka.sslEnabled" .) (empty .Values.tls.existingSecret) .Values.tls.autoGenerated.enabled (eq .Values.tls.autoGenerated.engine "cert-manager") -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka TLS credentials secret +*/}} +{{- define "kafka.tlsPasswordsSecretName" -}} +{{- if .Values.tls.passwordsSecret -}} + {{- print (tpl .Values.tls.passwordsSecret .) -}} +{{- else -}} + {{- printf "%s-tls-passwords" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsPasswordsSecret" -}} +{{- if and (include "kafka.sslEnabled" .) (or (empty .Values.tls.passwordsSecret) .Values.tls.autoGenerated.enabled) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- print (tpl .Values.provisioning.auth.tls.passwordsSecret .) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the Kafka Provisioning client +*/}} +{{- define "kafka.provisioning.serviceAccountName" -}} +{{- if .Values.provisioning.serviceAccount.create -}} + {{ default (printf "%s-provisioning" (include "common.names.fullname" .)) .Values.provisioning.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.provisioning.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible configuration configmap +*/}} +{{- define "kafka.controller.configmapName" -}} +{{- if .Values.controller.existingConfigmap -}} + {{- print (tpl .Values.controller.existingConfigmap .) -}} +{{- else if .Values.existingConfigmap -}} + {{- print (tpl .Values.existingConfigmap .) -}} +{{- else -}} + {{- printf "%s-configuration" (include "kafka.controller.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible secret configuration +*/}} +{{- define "kafka.controller.secretConfigName" -}} +{{- if .Values.controller.existingSecretConfig -}} + {{- print (tpl .Values.controller.existingSecretConfig .) -}} +{{- else if .Values.existingSecretConfig -}} + {{- print (tpl .Values.controller.existingSecretConfig .) -}} +{{- else -}} + {{- printf "%s-secret-configuration" (include "kafka.controller.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka controller-eligible secret configuration values +*/}} +{{- define "kafka.controller.secretConfig" -}} +{{- if .Values.secretConfig }} + {{- print (tpl .Values.secretConfig .) -}} +{{- end }} +{{- if .Values.controller.secretConfig }} + {{- print (tpl .Values.controller.secretConfig .) -}} +{{- end }} +{{- end -}} + +{{/* +Return true if a configmap object should be created for controller-eligible pods +*/}} +{{- define "kafka.controller.createConfigmap" -}} +{{- if and (not .Values.controller.existingConfigmap) (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config should be created for controller-eligible pods +*/}} +{{- define "kafka.controller.createSecretConfig" -}} +{{- if and (or .Values.controller.secretConfig .Values.secretConfig) (and (not .Values.controller.existingSecretConfig) (not .Values.existingSecretConfig)) }} + {{- true -}} +{{- end -}} +{{- end -}} +{{/* +Return true if a secret object with config exists for controller-eligible pods +*/}} +{{- define "kafka.controller.secretConfigExists" -}} +{{- if or .Values.controller.secretConfig .Values.secretConfig .Values.controller.existingSecretConfig .Values.existingSecretConfig }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker configuration configmap +*/}} +{{- define "kafka.broker.configmapName" -}} +{{- if .Values.broker.existingConfigmap -}} + {{- print (tpl .Values.broker.existingConfigmap .) -}} +{{- else if .Values.existingConfigmap -}} + {{- print (tpl .Values.existingConfigmap .) -}} +{{- else -}} + {{- printf "%s-configuration" (include "kafka.broker.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker secret configuration +*/}} +{{- define "kafka.broker.secretConfigName" -}} +{{- if .Values.broker.existingSecretConfig -}} + {{- print (tpl .Values.broker.existingSecretConfig .) -}} +{{- else if .Values.existingSecretConfig -}} + {{- print (tpl .Values.existingSecretConfig .) -}} +{{- else -}} + {{- printf "%s-secret-configuration" (include "kafka.broker.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka broker secret configuration values +*/}} +{{- define "kafka.broker.secretConfig" -}} +{{- if .Values.secretConfig }} + {{- print (tpl .Values.secretConfig .) -}} +{{- end }} +{{- if .Values.broker.secretConfig }} + {{- print (tpl .Values.broker.secretConfig .) -}} +{{- end }} +{{- end -}} + +{{/* +Return true if a configmap object should be created for broker pods +*/}} +{{- define "kafka.broker.createConfigmap" -}} +{{- if and (not .Values.broker.existingConfigmap) (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config should be created for broker pods +*/}} +{{- define "kafka.broker.createSecretConfig" -}} +{{- if and (or .Values.broker.secretConfig .Values.secretConfig) (and (not .Values.broker.existingSecretConfig) (not .Values.existingSecretConfig)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object with config exists for broker pods +*/}} +{{- define "kafka.broker.secretConfigExists" -}} +{{- if or .Values.broker.secretConfig .Values.secretConfig .Values.broker.existingSecretConfig .Values.existingSecretConfig }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka log4j2 ConfigMap name. +*/}} +{{- define "kafka.log4j2.configMapName" -}} +{{- if .Values.existingLog4j2ConfigMap -}} + {{- print (tpl .Values.existingLog4j2ConfigMap .) -}} +{{- else -}} + {{- printf "%s-log4j2-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.metrics.jmx.configmapName" -}} +{{- if .Values.metrics.jmx.existingConfigmap -}} + {{- print (tpl .Values.metrics.jmx.existingConfigmap .) -}} +{{- else -}} + {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.metrics.jmx.createConfigmap" -}} +{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the Kafka listeners settings based on the listeners.* object +*/}} +{{- define "kafka.listeners" -}} +{{- if .context.Values.listeners.overrideListeners -}} + {{- print .context.Values.listeners.overrideListeners -}} +{{- else -}} + {{- $listeners := list .context.Values.listeners.client .context.Values.listeners.interbroker -}} + {{- if .context.Values.externalAccess.enabled -}} + {{- $listeners = append $listeners .context.Values.listeners.external -}} + {{- end -}} + {{- if .isController -}} + {{- if .context.Values.controller.controllerOnly -}} + {{- $listeners = list .context.Values.listeners.controller -}} + {{- else -}} + {{- $listeners = append $listeners .context.Values.listeners.controller -}} + {{- range $i := .context.Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- end -}} + {{- else -}} + {{- range $i := .context.Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s://:%d" (upper $listener.name) (int $listener.containerPort)) -}} + {{- end -}} + {{- join "," $res -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the list of advertised listeners, although the advertised address will be replaced during each node init time +*/}} +{{- define "kafka.advertisedListeners" -}} +{{- if .Values.listeners.advertisedListeners -}} + {{- print .Values.listeners.advertisedListeners -}} +{{- else -}} + {{- $listeners := list .Values.listeners.client .Values.listeners.interbroker -}} + {{- range $i := .Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s://advertised-address-placeholder:%d" (upper $listener.name) (int $listener.containerPort)) -}} + {{- end -}} + {{- join "," $res -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the value listener.security.protocol.map based on the values of 'listeners.*.protocol' +*/}} +{{- define "kafka.securityProtocolMap" -}} +{{- if .context.Values.listeners.securityProtocolMap -}} + {{- print .context.Values.listeners.securityProtocolMap -}} +{{- else -}} + {{- $listeners := list .context.Values.listeners.controller .context.Values.listeners.client .context.Values.listeners.interbroker -}} + {{- if and .isController .context.Values.controller.controllerOnly -}} + {{- $listeners = list .context.Values.listeners.controller -}} + {{- else -}} + {{- range $i := .context.Values.listeners.extraListeners -}} + {{- $listeners = append $listeners $i -}} + {{- end -}} + {{- end -}} + {{- if .context.Values.externalAccess.enabled -}} + {{- $listeners = append $listeners .context.Values.listeners.external -}} + {{- end -}} + {{- $res := list -}} + {{- range $listener := $listeners -}} + {{- $res = append $res (printf "%s:%s" (upper $listener.name) (upper $listener.protocol)) -}} + {{- end -}} + {{ join "," $res }} +{{- end -}} +{{- end -}} + +{{/* +Returns the containerPorts for listeners.extraListeners +*/}} +{{- define "kafka.extraListeners.containerPorts" -}} +{{- range $listener := .Values.listeners.extraListeners -}} +- name: {{ lower $listener.name}} + containerPort: {{ $listener.containerPort }} +{{ end }} +{{- end -}} + +{{/* +Returns the controller quorum bootstrap servers based on the number of controller-eligible nodes +*/}} +{{- define "kafka.controller.quorumBootstrapServers" -}} +{{- if .Values.controller.quorumBootstrapServers -}} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.quorumBootstrapServers "context" $) -}} +{{- else -}} + {{- $fullname := include "kafka.controller.fullname" . }} + {{- $serviceName := printf "%s-headless" (include "kafka.controller.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- $releaseNamespace := include "common.names.namespace" . -}} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $port := int .Values.listeners.controller.containerPort }} + {{- $bootstrapServers := list -}} + {{- if and (.Values.controller.autoscaling) (.Values.controller.autoscaling.hpa) (.Values.controller.autoscaling.hpa.enabled) -}} + {{- range $i := until (int .Values.controller.autoscaling.hpa.maxReplicas) -}} + {{- $nodeAddress := printf "%s-%d.%s.%s.svc.%s:%d" $fullname (int $i) $serviceName $releaseNamespace $clusterDomain $port -}} + {{- $bootstrapServers = append $bootstrapServers $nodeAddress -}} + {{- end -}} + {{- else -}} + {{- range $i := until (int .Values.controller.replicaCount) -}} + {{- $nodeAddress := printf "%s-%d.%s.%s.svc.%s:%d" $fullname (int $i) $serviceName $releaseNamespace $clusterDomain $port -}} + {{- if eq (int $.Values.kraftVersion) 0 }} + {{- $nodeId := add (int $i) (int $.Values.controller.minId) -}} + {{- $bootstrapServers = append $bootstrapServers (printf "%d@%s" $nodeId $nodeAddress ) -}} + {{- else }} + {{- $bootstrapServers = append $bootstrapServers $nodeAddress -}} + {{- end }} + {{- end -}} + {{- end -}} + {{- join "," $bootstrapServers -}} +{{- end -}} +{{- end -}} + +{{/* +Section of the server.properties shared by both controller-eligible and broker nodes +*/}} +{{- define "kafka.commonConfig" -}} +controller.listener.names: {{ .Values.listeners.controller.name }} +{{- if eq (int .Values.kraftVersion) 0 }} +controller.quorum.voters: {{ include "kafka.controller.quorumBootstrapServers" . }} +{{- else }} +controller.quorum.bootstrap.servers: {{ include "kafka.controller.quorumBootstrapServers" . }} +{{- end }} +{{- if include "kafka.sslEnabled" . }} +# TLS configuration +ssl.keystore.type: JKS +ssl.truststore.type: JKS +ssl.keystore.location: /opt/drycc/kafka/config/certs/kafka.keystore.jks +ssl.truststore.location: /opt/drycc/kafka/config/certs/kafka.truststore.jks +ssl.client.auth: {{ .Values.tls.sslClientAuth }} +ssl.endpoint.identification.algorithm: {{ .Values.tls.endpointIdentificationAlgorithm }} +{{- end }} +{{- if (include "kafka.saslEnabled" .) }} +# Listeners SASL JAAS configuration +sasl.enabled.mechanisms: {{ upper .Values.sasl.enabledMechanisms }} +{{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} +sasl.mechanism.inter.broker.protocol: {{ upper .Values.sasl.interBrokerMechanism }} +{{- end }} +{{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} +sasl.mechanism.controller.protocol: {{ upper .Values.sasl.controllerMechanism }} +{{- end }} +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker .Values.listeners.controller }} +{{- range $i := .Values.listeners.extraListeners }} +{{- $listeners = append $listeners $i }} +{{- end }} +{{- if .Values.externalAccess.enabled }} +{{- $listeners = append $listeners .Values.listeners.external }} +{{- end }} +{{- range $listener := $listeners }} + {{- if and $listener.sslClientAuth (regexFind "SSL" (upper $listener.protocol)) }} +listener.name.{{lower $listener.name}}.ssl.client.auth: {{ $listener.sslClientAuth }} + {{- end }} + {{- if regexFind "SASL" (upper $listener.protocol) }} + {{- range $mechanism := splitList "," $.Values.sasl.enabledMechanisms }} + {{- $securityModule := include "kafka.saslSecurityModule" (dict "mechanism" (upper $mechanism)) }} + {{- if and (eq (upper $mechanism) "OAUTHBEARER") (or (eq $listener.name $.Values.listeners.interbroker.name) (eq $listener.name $.Values.listeners.controller.name)) }} +listener.name.{{lower $listener.name}}.oauthbearer.sasl.login.callback.handler.class: org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler + {{- end }} + {{- $saslJaasConfig := list $securityModule }} + {{- if eq $listener.name $.Values.listeners.interbroker.name }} + {{- if (eq (upper $mechanism) "OAUTHBEARER") }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "clientId=\"%s\"" $.Values.sasl.interbroker.clientId) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "clientSecret=\"interbroker-client-secret-placeholder\"") }} + {{- else }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.interbroker.user) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"interbroker-password-placeholder\"") }} + {{- end }} + {{- else if eq $listener.name $.Values.listeners.controller.name }} + {{- if (eq (upper $mechanism) "OAUTHBEARER") }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "clientId=\"%s\"" $.Values.sasl.controller.clientId) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "clientSecret=\"controller-client-secret-placeholder\"") }} + {{- else }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.controller.user) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"controller-password-placeholder\"") }} + {{- end }} + {{- end }} + {{- if eq (upper $mechanism) "PLAIN" }} + {{- if eq $listener.name $.Values.listeners.interbroker.name }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"interbroker-password-placeholder\"" $.Values.sasl.interbroker.user) }} + {{- else if eq $listener.name $.Values.listeners.controller.name }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"controller-password-placeholder\"" $.Values.sasl.controller.user) }} + {{- end }} + {{- range $i, $user := $.Values.sasl.client.users }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"password-placeholder-%d\"" $user (int $i)) }} + {{- end }} + {{- end }} +listener.name.{{lower $listener.name}}.{{lower $mechanism}}.sasl.jaas.config: {{ printf "%s;" (join " " $saslJaasConfig) }} + {{- if eq (upper $mechanism) "OAUTHBEARER" }} +listener.name.{{lower $listener.name}}.oauthbearer.sasl.server.callback.handler.class: org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerValidatorCallbackHandler + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- if regexFind "OAUTHBEARER" .Values.sasl.enabledMechanisms }} +sasl.oauthbearer.token.endpoint.url: {{ .Values.sasl.oauthbearer.tokenEndpointUrl }} +sasl.oauthbearer.jwks.endpoint.url: {{ .Values.sasl.oauthbearer.jwksEndpointUrl }} +sasl.oauthbearer.expected.audience: {{ .Values.sasl.oauthbearer.expectedAudience }} +sasl.oauthbearer.sub.claim.name: {{ .Values.sasl.oauthbearer.subClaimName }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Environment variables required to configure SASL +*/}} +{{- define "kafka.saslEnv" -}} +{{- if and (include "kafka.client.saslEnabled" . ) (include "kafka.saslUserPasswordsEnabled" .) .Values.sasl.client.users }} +- name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} +{{- if .Values.usePasswordFiles }} +- name: KAFKA_CLIENT_PASSWORDS_FILE + value: /opt/drycc/kafka/config/secrets/client-passwords +{{- else }} +- name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: client-passwords +{{- end }} +{{- end }} +{{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} +{{- if include "kafka.saslUserPasswordsEnabled" . }} +- name: KAFKA_INTER_BROKER_USER + value: {{ .Values.sasl.interbroker.user | quote }} +{{- if .Values.usePasswordFiles }} +- name: KAFKA_INTER_BROKER_PASSWORD_FILE + value: /opt/drycc/kafka/config/secrets/inter-broker-password +{{- else }} +- name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-password +{{- end }} +{{- end }} +{{- if include "kafka.saslClientSecretsEnabled" . }} +- name: KAFKA_INTER_BROKER_CLIENT_ID + value: {{ .Values.sasl.interbroker.clientId | quote }} +{{- if .Values.usePasswordFiles }} +- name: KAFKA_INTER_BROKER_CLIENT_SECRET_FILE + value: /opt/drycc/kafka/config/secrets/inter-broker-client-secret +{{- else }} +- name: KAFKA_INTER_BROKER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-client-secret +{{- end }} +{{- end }} +{{- end }} +{{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} +{{- if include "kafka.saslUserPasswordsEnabled" . }} +- name: KAFKA_CONTROLLER_USER + value: {{ .Values.sasl.controller.user | quote }} +{{- if .Values.usePasswordFiles }} +- name: KAFKA_CONTROLLER_PASSWORD_FILE + value: /opt/drycc/kafka/config/secrets/controller-password +{{- else }} +- name: KAFKA_CONTROLLER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-password +{{- end }} +{{- end }} +{{- if include "kafka.saslClientSecretsEnabled" . }} +- name: KAFKA_CONTROLLER_CLIENT_ID + value: {{ .Values.sasl.controller.clientId | quote }} +{{- if .Values.usePasswordFiles }} +- name: KAFKA_CONTROLLER_CLIENT_SECRET_FILE + value: /opt/drycc/kafka/config/secrets/controller-client-secret +{{- else }} +- name: KAFKA_CONTROLLER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-client-secret +{{- end }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Environment variables shared by both controller-eligible and broker nodes +*/}} +{{- define "kafka.commonEnv" -}} +- name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} +- name: KAFKA_KRAFT_CLUSTER_ID + valueFrom: + secretKeyRef: + name: {{ template "kafka.kraftSecretName" . }} + key: cluster-id +{{- if and (include "kafka.saslEnabled" .) (or (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.controllerMechanism)) (regexFind "SCRAM" (upper .Values.sasl.interBrokerMechanism))) }} +- name: KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS + value: "true" +{{ include "kafka.saslEnv" . }} +{{- end }} +{{- if .Values.provisioning.enabled }} +{{- end }} +{{- if .Values.metrics.jmx.enabled }} +- name: JMX_PORT + value: {{ .Values.metrics.jmx.kafkaJmxPort | quote }} +{{- end }} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.defaultInitContainers.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.defaultInitContainers.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kafka.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kafka.validateValues.listener.protocols" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.controller.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.broker.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.controller.externalIPListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.broker.externalIPListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.brokerRackAwareness" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.domainSpecified" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} +{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecret" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.provisioning.tlsPasswords" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.missingController" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Authentication protocols for Kafka */}} +{{- define "kafka.validateValues.listener.protocols" -}} +{{- $authProtocols := list "PLAINTEXT" "SASL_PLAINTEXT" "SASL_SSL" "SSL" -}} +{{- if not .Values.listeners.securityProtocolMap -}} +{{- $listeners := list .Values.listeners.client .Values.listeners.interbroker .Values.listeners.controller -}} +{{- if and .Values.externalAccess.enabled -}} +{{- $listeners = append $listeners .Values.listeners.external -}} +{{- end -}} +{{- $error := false -}} +{{- range $listener := $listeners -}} +{{- if not (has (upper $listener.protocol) $authProtocols) -}} +{{- $error := true -}} +{{- end -}} +{{- end -}} +{{- if $error -}} +kafka: listeners.*.protocol + Available authentication protocols are "PLAINTEXT" "SASL_PLAINTEXT" "SSL" "SASL_SSL" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of controller-eligible replicas must be the same as NodePort list in controller-eligible external service */}} +{{- define "kafka.validateValues.controller.nodePortListLength" -}} +{{- $replicaCount := int .Values.controller.replicaCount -}} +{{- $nodePortListLength := len .Values.externalAccess.controller.service.nodePorts -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.controller.service.nodePorts -}} +{{- $nodePortListLengthEqualsReplicaCount := eq $nodePortListLength $replicaCount -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.controller.service.externalIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq .Values.externalAccess.controller.service.type "NodePort") (or (and (not $nodePortListIsEmpty) (not $nodePortListLengthEqualsReplicaCount)) (and $nodePortListIsEmpty $externalIPListIsEmpty)) -}} +kafka: .Values.externalAccess.controller.service.nodePorts + Number of controller-eligible replicas and externalAccess.controller.service.nodePorts array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length nodePorts = {{ $nodePortListLength }} - {{ $externalIPListIsEmpty }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of broker replicas must be the same as NodePort list in broker external service */}} +{{- define "kafka.validateValues.broker.nodePortListLength" -}} +{{- $replicaCount := int .Values.broker.replicaCount -}} +{{- $nodePortListLength := len .Values.externalAccess.broker.service.nodePorts -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.broker.service.nodePorts -}} +{{- $nodePortListLengthEqualsReplicaCount := eq $nodePortListLength $replicaCount -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.broker.service.externalIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq .Values.externalAccess.broker.service.type "NodePort") (or (and (not $nodePortListIsEmpty) (not $nodePortListLengthEqualsReplicaCount)) (and $nodePortListIsEmpty $externalIPListIsEmpty)) -}} +kafka: .Values.externalAccess.broker.service.nodePorts + Number of broker replicas and externalAccess.broker.service.nodePorts array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length nodePorts = {{ $nodePortListLength }} - {{ $externalIPListIsEmpty }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as externalIPs list */}} +{{- define "kafka.validateValues.controller.externalIPListLength" -}} +{{- $replicaCount := int .Values.controller.replicaCount -}} +{{- $externalIPListLength := len .Values.externalAccess.controller.service.externalIPs -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.controller.service.externalIPs -}} +{{- $externalIPListEqualsReplicaCount := eq $externalIPListLength $replicaCount -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.controller.service.nodePorts -}} +{{- if and .Values.externalAccess.enabled (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)) (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq .Values.externalAccess.controller.service.type "NodePort") (or (and (not $externalIPListIsEmpty) (not $externalIPListEqualsReplicaCount)) (and $externalIPListIsEmpty $nodePortListIsEmpty)) -}} +kafka: .Values.externalAccess.controller.service.externalIPs + Number of controller-eligible replicas and externalAccess.controller.service.externalIPs array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length externalIPs = {{ $externalIPListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as externalIPs list */}} +{{- define "kafka.validateValues.broker.externalIPListLength" -}} +{{- $replicaCount := int .Values.broker.replicaCount -}} +{{- $externalIPListLength := len .Values.externalAccess.broker.service.externalIPs -}} +{{- $externalIPListIsEmpty := empty .Values.externalAccess.broker.service.externalIPs -}} +{{- $externalIPListEqualsReplicaCount := eq $externalIPListLength $replicaCount -}} +{{- $nodePortListIsEmpty := empty .Values.externalAccess.broker.service.nodePorts -}} +{{- if and .Values.externalAccess.enabled (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq .Values.externalAccess.broker.service.type "NodePort") (or (and (not $externalIPListIsEmpty) (not $externalIPListEqualsReplicaCount)) (and $externalIPListIsEmpty $nodePortListIsEmpty)) -}} +kafka: .Values.externalAccess.broker.service.externalIPs + Number of broker replicas and externalAccess.broker.service.externalIPs array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length externalIPs = {{ $externalIPListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - broker rack assignment allowed values */}} +{{- define "kafka.validateValues.brokerRackAwareness" -}} +{{- if and .Values.brokerRackAwareness.enabled (ne .Values.brokerRackAwareness.cloudProvider "aws-az") (ne .Values.brokerRackAwareness.cloudProvider "azure") -}} +kafka: .Values.brokerRackAwareness.cloudProvider + Available values for the cloud provider to use for broker rack awareness are "aws-az" or "azure" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - domain must be defined if external service type ClusterIP */}} +{{- define "kafka.validateValues.domainSpecified" -}} +{{- if and (eq .Values.externalAccess.controller.service.type "ClusterIP") (empty .Values.externalAccess.controller.service.domain) -}} +kafka: .Values.externalAccess.controller.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- if and (eq .Values.externalAccess.broker.service.type "ClusterIP") (empty .Values.externalAccess.broker.service.domain) -}} +kafka: .Values.externalAccess.broker.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - service type for external access */}} +{{- define "kafka.validateValues.externalAccessServiceType" -}} +{{- if and (not (eq .Values.externalAccess.controller.service.type "NodePort")) (not (eq .Values.externalAccess.controller.service.type "LoadBalancer")) (not (eq .Values.externalAccess.controller.service.type "ClusterIP")) -}} +kafka: externalAccess.controller.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- if and (not (eq .Values.externalAccess.broker.service.type "NodePort")) (not (eq .Values.externalAccess.broker.service.type "LoadBalancer")) (not (eq .Values.externalAccess.broker.service.type "ClusterIP")) -}} +kafka: externalAccess.broker.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and .Values.externalAccess.enabled .Values.defaultInitContainers.autoDiscovery.enabled (not .Values.rbac.create ) }} +kafka: rbac.create + By specifying "externalAccess.enabled=true" and "defaultInitContainers.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- if and .Values.externalAccess.enabled .Values.defaultInitContainers.autoDiscovery.enabled (gt (int .Values.controller.replicaCount) 0) (not .Values.controller.automountServiceAccountToken) }} +kafka: controller-automountServiceAccountToken + By specifying "externalAccess.enabled=true" and "defaultInitContainers.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires the service account token. Please set controller.automountServiceAccountToken=true + and broker.automountServiceAccountToken=true. +{{- end -}} +{{- if and .Values.externalAccess.enabled .Values.defaultInitContainers.autoDiscovery.enabled (gt (int .Values.broker.replicaCount) 0) (not .Values.broker.automountServiceAccountToken) }} +kafka: broker-automountServiceAccountToken + By specifying "externalAccess.enabled=true" and "defaultInitContainers.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires the service account token. Please set controller.automountServiceAccountToken=true + and broker.automountServiceAccountToken=true. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.controller.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.controller.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (gt (int .Values.controller.replicaCount) 0) (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)) (eq .Values.externalAccess.controller.service.type "LoadBalancer") (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.controller.service.loadBalancerNames or externalAccess.controller.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "defaultInitContainers.autoDiscovery.enabled=false" and + "externalAccess.controller.service.type=LoadBalancer" at least one of externalAccess.controller.service.loadBalancerNames + or externalAccess.controller.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.broker.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.broker.service.loadBalancerIPs -}} +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and .Values.externalAccess.enabled (gt 0 $replicaCount) (eq .Values.externalAccess.broker.service.type "LoadBalancer") (not .Values.defaultInitContainers.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.broker.service.loadBalancerNames or externalAccess.broker.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "defaultInitContainers.autoDiscovery.enabled=false" and + "externalAccess.broker.service.type=LoadBalancer" at least one of externalAccess.broker.service.loadBalancerNames + or externalAccess.broker.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.controller.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.controller.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (or .context.Values.externalAccess.controller.forceExpose (not .context.Values.controller.controllerOnly)) (not .context.Values.defaultInitContainers.autoDiscovery.enabled) (eq .context.Values.externalAccess.controller.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- $replicaCount := int .context.Values.broker.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.broker.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (gt 0 $replicaCount) (not .context.Values.defaultInitContainers.autoDiscovery.enabled) (eq .context.Values.externalAccess.broker.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} +{{- define "kafka.validateValues.saslMechanisms" -}} +{{- if and (include "kafka.saslEnabled" .) (not .Values.sasl.enabledMechanisms) }} +kafka: sasl.enabledMechanisms + The SASL mechanisms are required when listeners use SASL security protocol. +{{- end }} +{{- if not (contains .Values.sasl.interBrokerMechanism .Values.sasl.enabledMechanisms) }} +kafka: sasl.enabledMechanisms + sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at sasl.enabledMechanisms +{{- end -}} +{{- if not (contains .Values.sasl.controllerMechanism .Values.sasl.enabledMechanisms) }} +kafka: sasl.enabledMechanisms + sasl.controllerMechanism must be provided and it should be one of the specified mechanisms at sasl.enabledMechanisms +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} +{{- define "kafka.validateValues.tlsSecret" -}} +{{- if and (include "kafka.sslEnabled" .) (eq (upper .Values.tls.type) "JKS") (empty .Values.tls.existingSecret) (not .Values.tls.autoGenerated.enabled) }} +kafka: tls.existingSecret + A secret containing the Kafka JKS keystores and truststore is required + when TLS encryption in enabled and TLS format is "JKS" +{{- else if and (include "kafka.sslEnabled" .) (eq (upper .Values.tls.type) "PEM") (empty .Values.tls.existingSecret) (not .Values.tls.autoGenerated.enabled) }} +kafka: tls.existingSecret + A secret containing the Kafka TLS certificates and keys is required + when TLS encryption in enabled and TLS format is "PEM" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.provisioning.tlsPasswords" -}} +{{- if and (regexFind "SSL" (upper .Values.listeners.client.protocol)) .Values.provisioning.enabled (not .Values.provisioning.auth.tls.passwordsSecret) }} +{{- if or .Values.provisioning.auth.tls.keyPasswordSecretKey .Values.provisioning.auth.tls.keystorePasswordSecretKey .Values.provisioning.auth.tls.truststorePasswordSecretKey }} +kafka: tls.keyPasswordSecretKey,tls.keystorePasswordSecretKey,tls.truststorePasswordSecretKey + tls.keyPasswordSecretKey,tls.keystorePasswordSecretKey,tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - At least 1 controller is configured or controller.quorum.bootstrap.servers is set */}} +{{- define "kafka.validateValues.missingController" -}} +{{- if and (le (int .Values.controller.replicaCount) 0) (not .Values.controller.quorumBootstrapServers) }} +kafka: Missing controller-eligible nodes + No controller-eligible nodes have been configured. +{{- end -}} +{{- end -}} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/_init_containers.tpl b/addons/kafka/4.2/chart/kafka-4.2/templates/_init_containers.tpl new file mode 100644 index 00000000..2d742810 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/_init_containers.tpl @@ -0,0 +1,513 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Returns an init-container that changes the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +*/}} +{{- define "kafka.defaultInitContainers.volumePermissions" -}} +{{- $roleValues := index .context.Values .role -}} +- name: volume-permissions + image: {{ include "kafka.volumePermissions.image" .context }} + imagePullPolicy: {{ .context.Values.defaultInitContainers.volumePermissions.image.pullPolicy | quote }} + {{- if .context.Values.defaultInitContainers.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .context.Values.defaultInitContainers.volumePermissions.containerSecurityContext "context" .context) | nindent 4 }} + {{- end }} + {{- if .context.Values.defaultInitContainers.volumePermissions.resources }} + resources: {{- toYaml .context.Values.defaultInitContainers.volumePermissions.resources | nindent 4 }} + {{- else if ne .context.Values.defaultInitContainers.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .context.Values.defaultInitContainers.volumePermissions.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ $roleValues.persistence.mountPath }} {{ $roleValues.logPersistence.mountPath }} + {{- if eq ( toString ( .context.Values.defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + find {{ $roleValues.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R $(id -u):$(id -G | cut -d " " -f2) + find {{ $roleValues.logPersistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R $(id -u):$(id -G | cut -d " " -f2) + {{- else }} + find {{ $roleValues.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ $roleValues.containerSecurityContext.runAsUser }}:{{ $roleValues.podSecurityContext.fsGroup }} + find {{ $roleValues.logPersistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ $roleValues.containerSecurityContext.runAsUser }}:{{ $roleValues.podSecurityContext.fsGroup }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ $roleValues.persistence.mountPath }} + - name: logs + mountPath: {{ $roleValues.logPersistence.mountPath }} +{{- end -}} + +{{/* +Returns an init-container that auto-discovers the external access details +*/}} +{{- define "kafka.defaultInitContainers.autoDiscovery" -}} +{{- $externalAccess := index .context.Values.externalAccess .role }} +- name: auto-discovery + image: {{ include "kafka.autoDiscovery.image" .context }} + imagePullPolicy: {{ .context.Values.defaultInitContainers.autoDiscovery.image.pullPolicy | quote }} + {{- if .context.Values.defaultInitContainers.autoDiscovery.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .context.Values.defaultInitContainers.autoDiscovery.containerSecurityContext "context" .context) | nindent 4 }} + {{- end }} + {{- if .context.Values.defaultInitContainers.autoDiscovery.resources }} + resources: {{- toYaml .context.Values.defaultInitContainers.autoDiscovery.resources | nindent 4 }} + {{- else if ne .context.Values.defaultInitContainers.autoDiscovery.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .context.Values.defaultInitContainers.autoDiscovery.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - init-stack + - /bin/bash + args: + - -ec + - | + SVC_NAME="${MY_POD_NAME}-external" + AUTODISCOVERY_SERVICE_TYPE="${AUTODISCOVERY_SERVICE_TYPE:-}" + + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + + if [[ "$AUTODISCOVERY_SERVICE_TYPE" = "LoadBalancer" ]]; then + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready $MY_POD_NAMESPACE $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "$MY_POD_NAMESPACE" "$SVC_NAME" | tee "/shared/external-host.txt" + elif [[ "$AUTODISCOVERY_SERVICE_TYPE" = "NodePort" ]]; then + k8s_svc_node_port "$MY_POD_NAMESPACE" "$SVC_NAME" | tee "/shared/external-port.txt" + else + echo "Unsupported autodiscovery service type: '$AUTODISCOVERY_SERVICE_TYPE'" + exit 1 + fi + + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AUTODISCOVERY_SERVICE_TYPE + value: {{ $externalAccess.service.type | quote }} + volumeMounts: + - name: init-shared + mountPath: /shared +{{- end -}} + +{{/* +Returns an init-container that prepares the Kafka configuration files for main containers to use them +*/}} +{{- define "kafka.defaultInitContainers.prepareConfig" -}} +{{- $roleValues := index .context.Values .role -}} +{{- $externalAccessEnabled := or (and (eq .role "broker") .context.Values.externalAccess.enabled) (and (eq .role "controller") .context.Values.externalAccess.enabled (or .context.Values.externalAccess.controller.forceExpose (not .context.Values.controller.controllerOnly))) }} +- name: prepare-config + image: {{ include "kafka.image" .context }} + imagePullPolicy: {{ .context.Values.image.pullPolicy }} + {{- if .context.Values.defaultInitContainers.prepareConfig.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .context.Values.defaultInitContainers.prepareConfig.containerSecurityContext "context" .context) | nindent 4 }} + {{- end }} + {{- if .context.Values.defaultInitContainers.prepareConfig.resources }} + resources: {{- toYaml .context.Values.defaultInitContainers.prepareConfig.resources | nindent 4 }} + {{- else if ne .context.Values.defaultInitContainers.prepareConfig.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .context.Values.defaultInitContainers.prepareConfig.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/libkafka.sh + + {{- if $externalAccessEnabled }} + configure_external_access() { + local host port + # Configure external hostname + if [[ -f "/shared/external-host.txt" ]]; then + host=$(cat "/shared/external-host.txt") + elif [[ -n "${EXTERNAL_ACCESS_HOST:-}" ]]; then + host="$EXTERNAL_ACCESS_HOST" + elif [[ -n "${EXTERNAL_ACCESS_HOSTS_LIST:-}" ]]; then + read -r -a hosts <<< "$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_HOSTS_LIST}")" + host="${hosts[$POD_ID]}" + elif is_boolean_yes "$EXTERNAL_ACCESS_HOST_USE_PUBLIC_IP"; then + host=$(curl -s https://ipinfo.io/ip) + else + error "External access hostname not provided" + fi + # Configure external port + if [[ -f "/shared/external-port.txt" ]]; then + port=$(cat "/shared/external-port.txt") + elif [[ -n "${EXTERNAL_ACCESS_PORT:-}" ]]; then + port="$EXTERNAL_ACCESS_PORT" + if is_boolean_yes "${EXTERNAL_ACCESS_PORT_AUTOINCREMENT:-}"; then + port="$((port + POD_ID))" + fi + elif [[ -n "${EXTERNAL_ACCESS_PORTS_LIST:-}" ]]; then + read -r -a ports <<<"$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_PORTS_LIST}")" + port="${ports[$POD_ID]}" + else + error "External access port not provided" + fi + # Configure Kafka advertised listeners + sed -i -E "s|^(advertised\.listeners=\S+)$|\1,${EXTERNAL_ACCESS_LISTENER_NAME}://${host}:${port}|" "$KAFKA_CONF_FILE" + } + {{- end }} + {{- if include "kafka.sslEnabled" .context }} + configure_kafka_tls() { + # Remove previously existing keystores and certificates, if any + rm -f /certs/kafka.keystore.jks /certs/kafka.truststore.jks + rm -f /certs/tls.crt /certs/tls.key /certs/ca.crt + find /certs -name "xx*" -exec rm {} \; + if [[ "${KAFKA_TLS_TYPE}" = "PEM" ]]; then + # Copy PEM certificate and key + if [[ -f "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.crt" && "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.key" ]]; then + cp "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.crt" /certs/tls.crt + # Copy the PEM key ensuring the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -passin pass:"${KAFKA_TLS_PEM_KEY_PASSWORD:-}" -in "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.key" > /certs/tls.key + elif [[ -f /mounted-certs/tls.crt && -f /mounted-certs/tls.key ]]; then + cp "/mounted-certs/tls.crt" /certs/tls.crt + # Copy the PEM key ensuring the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -passin pass:"${KAFKA_TLS_PEM_KEY_PASSWORD:-}" -nocrypt -in "/mounted-certs/tls.key" > /certs/tls.key + else + error "PEM key and cert files not found" + fi + {{- if not .context.Values.tls.pemChainIncluded }} + # Copy CA certificate + if [[ -f /mounted-certs/ca.crt ]]; then + cp /mounted-certs/ca.crt /certs/ca.crt + else + error "CA certificate file not found" + fi + {{- else }} + # CA certificates are also included in the same certificate + # All public certs will be included in the truststore + cp /certs/tls.crt /certs/ca.crt + {{- end }} + # Create JKS keystore from PEM cert and key + openssl pkcs12 -export -in "/certs/tls.crt" \ + -passout pass:"$KAFKA_TLS_KEYSTORE_PASSWORD" \ + -inkey "/certs/tls.key" \ + -out "/certs/kafka.keystore.p12" + keytool -importkeystore -srckeystore "/certs/kafka.keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "$KAFKA_TLS_KEYSTORE_PASSWORD" \ + -deststorepass "$KAFKA_TLS_KEYSTORE_PASSWORD" \ + -destkeystore "/certs/kafka.keystore.jks" \ + -noprompt + # Create JKS truststore from CA cert + keytool -keystore /certs/kafka.truststore.jks -alias CARoot -import -file /certs/ca.crt -storepass "$KAFKA_TLS_TRUSTSTORE_PASSWORD" -noprompt + # Remove extra files + rm -f "/certs/kafka.keystore.p12" "/certs/tls.crt" "/certs/tls.key" "/certs/ca.crt" + elif [[ "$KAFKA_TLS_TYPE" = "JKS" ]]; then + if [[ -f "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.keystore.jks" ]]; then + cp "/mounted-certs/kafka-${POD_ROLE}-${POD_ID}.keystore.jks" /certs/kafka.keystore.jks + elif [[ -f "$KAFKA_TLS_KEYSTORE_FILE" ]]; then + cp "$KAFKA_TLS_KEYSTORE_FILE" /certs/kafka.keystore.jks + else + error "Keystore file not found" + fi + if [[ -f "$KAFKA_TLS_TRUSTSTORE_FILE" ]]; then + cp "$KAFKA_TLS_TRUSTSTORE_FILE" /certs/kafka.truststore.jks + else + error "Truststore file not found" + fi + else + error "Invalid type $KAFKA_TLS_TYPE" + fi + # Configure TLS password settings in Kafka configuration + [[ -n "${KAFKA_TLS_KEYSTORE_PASSWORD:-}" ]] && kafka_server_conf_set "ssl.keystore.password" "$KAFKA_TLS_KEYSTORE_PASSWORD" + [[ -n "${KAFKA_TLS_TRUSTSTORE_PASSWORD:-}" ]] && kafka_server_conf_set "ssl.truststore.password" "$KAFKA_TLS_TRUSTSTORE_PASSWORD" + [[ -n "${KAFKA_TLS_PEM_KEY_PASSWORD:-}" ]] && kafka_server_conf_set "ssl.key.password" "$KAFKA_TLS_PEM_KEY_PASSWORD" + # Avoid errors caused by previous checks + true + } + {{- end }} + {{- if include "kafka.saslEnabled" .context }} + configure_kafka_sasl() { + # Replace placeholders with passwords + {{- if regexFind "SASL" (upper .context.Values.listeners.interbroker.protocol) }} + {{- if include "kafka.saslUserPasswordsEnabled" .context }} + replace_in_file "$KAFKA_CONF_FILE" "interbroker-password-placeholder" "$KAFKA_INTER_BROKER_PASSWORD" + {{- end }} + {{- if include "kafka.saslClientSecretsEnabled" .context }} + replace_in_file "$KAFKA_CONF_FILE" "interbroker-client-secret-placeholder" "$KAFKA_INTER_BROKER_CLIENT_SECRET" + {{- end }} + {{- end }} + {{- if regexFind "SASL" (upper .context.Values.listeners.controller.protocol) }} + {{- if include "kafka.saslUserPasswordsEnabled" .context }} + replace_in_file "$KAFKA_CONF_FILE" "controller-password-placeholder" "$KAFKA_CONTROLLER_PASSWORD" + {{- end }} + {{- if include "kafka.saslClientSecretsEnabled" .context }} + replace_in_file "$KAFKA_CONF_FILE" "controller-client-secret-placeholder" "$KAFKA_CONTROLLER_CLIENT_SECRET" + {{- end }} + {{- end }} + {{- if include "kafka.client.saslEnabled" .context }} + read -r -a passwords <<< "$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS:-}")" + for ((i = 0; i < ${#passwords[@]}; i++)); do + replace_in_file "$KAFKA_CONF_FILE" "password-placeholder-${i}\"" "${passwords[i]}\"" + done + {{- end }} + } + {{- end }} + {{- if .context.Values.brokerRackAwareness.enabled }} + configure_kafka_broker_rack() { + local -r metadata_api_ip="169.254.169.254" + local broker_rack="" + {{- if eq .context.Values.brokerRackAwareness.cloudProvider "aws-az" }} + echo "Obtaining broker.rack for aws-az rack assignment" + ec2_metadata_token=$(curl -X PUT "http://${metadata_api_ip}/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 60") + broker_rack=$(curl -H "X-aws-ec2-metadata-token: $ec2_metadata_token" "http://${metadata_api_ip}/latest/meta-data/placement/availability-zone-id") + {{- else if eq .context.Values.brokerRackAwareness.cloudProvider "azure" }} + echo "Obtaining broker.rack for azure rack assignment" + location=$(curl -s -H Metadata:true --noproxy "*" "http://${metadata_api_ip}/metadata/instance/compute/location?api-version={{ .context.Values.brokerRackAwareness.azureApiVersion }}&format=text") + zone=$(curl -s -H Metadata:true --noproxy "*" "http://${metadata_api_ip}/metadata/instance/compute/zone?api-version={{ .context.Values.brokerRackAwareness.azureApiVersion }}&format=text") + broker_rack="${location}-${zone}" + {{- end }} + kafka_server_conf_set "broker.rack" "$broker_rack" + } + {{- end }} + {{- if and $externalAccessEnabled .context.Values.defaultInitContainers.autoDiscovery.enabled }} + # Wait for autodiscovery to finish + retry_while "test -f /shared/external-host.txt -o -f /shared/external-port.txt" || error "Timed out waiting for autodiscovery init-container" + {{- end }} + + cp /configmaps/server.properties $KAFKA_CONF_FILE + + # Get pod ID and role, last and second last fields in the pod name respectively + POD_ID="${MY_POD_NAME##*-}" + POD_ROLE="${MY_POD_NAME%-*}"; POD_ROLE="${POD_ROLE##*-}" + + # Configure node.id + ID=$((POD_ID + KAFKA_MIN_ID)) + [[ -f "/drycc/kafka/data/meta.properties" ]] && ID="$(grep "node.id" /drycc/kafka/data/meta.properties | awk -F '=' '{print $2}')" + kafka_server_conf_set "node.id" "$ID" + # Configure initial controllers + if [[ "controller" =~ "$POD_ROLE" ]]; then + INITIAL_CONTROLLERS=() + for ((i = 0; i < {{ int .context.Values.controller.replicaCount }}; i++)); do + var="KAFKA_CONTROLLER_${i}_DIR_ID"; DIR_ID="${!var}" + [[ $i -eq $POD_ID ]] && [[ -f "/drycc/kafka/data/meta.properties" ]] && DIR_ID="$(grep "directory.id" /drycc/kafka/data/meta.properties | awk -F '=' '{print $2}')" + INITIAL_CONTROLLERS+=("${i}@${KAFKA_FULLNAME}-${POD_ROLE}-${i}.${KAFKA_CONTROLLER_SVC_NAME}.${MY_POD_NAMESPACE}.svc.${CLUSTER_DOMAIN}:${KAFKA_CONTROLLER_PORT}:${DIR_ID}") + done + echo "${INITIAL_CONTROLLERS[*]}" | awk -v OFS=',' '{$1=$1}1' > /shared/initial-controllers.txt + fi + {{- if not .context.Values.listeners.advertisedListeners }} + replace_in_file "$KAFKA_CONF_FILE" "advertised-address-placeholder" "${MY_POD_NAME}.${KAFKA_FULLNAME}-${POD_ROLE}-headless.${MY_POD_NAMESPACE}.svc.${CLUSTER_DOMAIN}" + {{- if $externalAccessEnabled }} + configure_external_access + {{- end }} + {{- end }} + {{- if include "kafka.sslEnabled" .context }} + configure_kafka_tls + {{- end }} + {{- if include "kafka.saslEnabled" .context }} + sasl_env_vars=( + KAFKA_CLIENT_PASSWORDS + KAFKA_INTER_BROKER_PASSWORD + KAFKA_INTER_BROKER_CLIENT_SECRET + KAFKA_CONTROLLER_PASSWORD + KAFKA_CONTROLLER_CLIENT_SECRET + ) + for env_var in "${sasl_env_vars[@]}"; do + file_env_var="${env_var}_FILE" + if [[ -n "${!file_env_var:-}" ]]; then + if [[ -r "${!file_env_var:-}" ]]; then + export "${env_var}=$(< "${!file_env_var}")" + unset "${file_env_var}" + else + warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable." + fi + fi + done + configure_kafka_sasl + {{- end }} + {{- if .context.Values.brokerRackAwareness.enabled }} + configure_kafka_broker_rack + {{- end }} + if [[ -f /secret-config/server-secret.properties ]]; then + cat /secret-config/server-secret.properties >> $KAFKA_CONF_FILE + fi + + {{- include "common.tplvalues.render" ( dict "value" .context.Values.defaultInitContainers.prepareConfig.extraInit "context" .context ) | nindent 6 }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .context.Values.image.debug .context.Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KAFKA_FULLNAME + value: {{ include "common.names.fullname" .context | quote }} + - name: CLUSTER_DOMAIN + value: {{ .context.Values.clusterDomain | quote }} + - name: KAFKA_VOLUME_DIR + value: {{ $roleValues.persistence.mountPath | quote }} + - name: KAFKA_CONF_FILE + value: /config/server.properties + - name: KAFKA_MIN_ID + value: {{ $roleValues.minId | quote }} + - name: KAFKA_CONTROLLER_SVC_NAME + value: {{ printf "%s-headless" (include "kafka.controller.fullname" .context) | trunc 63 | trimSuffix "-" }} + - name: KAFKA_CONTROLLER_PORT + value: {{ .context.Values.listeners.controller.containerPort | quote }} + {{- $kraftSecret := include "kafka.kraftSecretName" .context }} + {{- range $i := until (int .context.Values.controller.replicaCount) }} + - name: KAFKA_CONTROLLER_{{ $i }}_DIR_ID + valueFrom: + secretKeyRef: + name: {{ $kraftSecret }} + key: controller-{{ $i }}-id + {{- end }} + {{- if $externalAccessEnabled }} + - name: EXTERNAL_ACCESS_LISTENER_NAME + value: {{ upper .context.Values.listeners.external.name | quote }} + {{- $externalAccess := index .context.Values.externalAccess .role }} + {{- if or (eq $externalAccess.service.type "LoadBalancer") (and $externalAccess.service.loadBalancerNames (eq $externalAccess.service.type "ClusterIP")) }} + {{- if not .context.Values.defaultInitContainers.autoDiscovery.enabled }} + - name: EXTERNAL_ACCESS_HOSTS_LIST + value: {{ join "," (default $externalAccess.service.loadBalancerIPs $externalAccess.service.loadBalancerNames) | quote }} + {{- end }} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote }} + {{- else if eq $externalAccess.service.type "NodePort" }} + {{- if $externalAccess.service.domain }} + - name: EXTERNAL_ACCESS_HOST + value: {{ $externalAccess.service.domain | quote }} + {{- else if and $externalAccess.service.usePodIPs .context.Values.defaultInitContainers.autoDiscovery.enabled }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EXTERNAL_ACCESS_HOST + value: "$(MY_POD_IP)" + {{- else if or $externalAccess.service.useHostIPs .context.Values.defaultInitContainers.autoDiscovery.enabled }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: EXTERNAL_ACCESS_HOST + value: "$(HOST_IP)" + {{- else if and $externalAccess.service.externalIPs (not .context.Values.defaultInitContainers.autoDiscovery.enabled) }} + - name: EXTERNAL_ACCESS_HOSTS_LIST + value: {{ join "," $externalAccess.service.externalIPs }} + {{- else }} + - name: EXTERNAL_ACCESS_HOST_USE_PUBLIC_IP + value: "true" + {{- end }} + {{- if not .context.Values.defaultInitContainers.autoDiscovery.enabled }} + {{- if and $externalAccess.service.externalIPs (empty $externalAccess.service.nodePorts)}} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote }} + {{- else }} + - name: EXTERNAL_ACCESS_PORTS_LIST + value: {{ join "," $externalAccess.service.nodePorts | quote }} + {{- end }} + {{- end }} + {{- else if eq $externalAccess.service.type "ClusterIP" }} + - name: EXTERNAL_ACCESS_HOST + value: {{ $externalAccess.service.domain | quote }} + - name: EXTERNAL_ACCESS_PORT + value: {{ $externalAccess.service.ports.external | quote}} + - name: EXTERNAL_ACCESS_PORT_AUTOINCREMENT + value: "true" + {{- end }} + {{- end }} + {{- if include "kafka.saslEnabled" .context }} + {{- include "kafka.saslEnv" .context | nindent 4 }} + {{- end }} + {{- if include "kafka.sslEnabled" .context }} + - name: KAFKA_TLS_TYPE + value: {{ ternary "PEM" "JKS" (or .context.Values.tls.autoGenerated.enabled (eq (upper .context.Values.tls.type) "PEM")) }} + {{- if eq (upper .context.Values.tls.type) "JKS" }} + - name: KAFKA_TLS_KEYSTORE_FILE + value: {{ printf "/mounted-certs/%s" ( default "kafka.keystore.jks" .context.Values.tls.jksKeystoreKey) | quote }} + - name: KAFKA_TLS_TRUSTSTORE_FILE + value: {{ printf "/mounted-certs/%s" ( default "kafka.truststore.jks" .context.Values.tls.jksTruststoreKey) | quote }} + {{- end }} + - name: KAFKA_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ .context.Values.tls.passwordsSecretTruststoreKey | quote }} + {{- if and (not .context.Values.tls.autoGenerated.enabled) (or .context.Values.tls.keyPassword (and .context.Values.tls.passwordsSecret .context.Values.tls.passwordsSecretPemPasswordKey)) }} + - name: KAFKA_TLS_PEM_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ default "key-password" .context.Values.tls.passwordsSecretPemPasswordKey | quote }} + {{- end }} + {{- end }} + volumeMounts: + - name: data + mountPath: /drycc/kafka + - name: kafka-config + mountPath: /config + - name: kafka-configmaps + mountPath: /configmaps + - name: kafka-secret-config + mountPath: /secret-config + - name: tmp + mountPath: /tmp + - name: init-shared + mountPath: /shared + {{- if include "kafka.sslEnabled" .context }} + - name: kafka-shared-certs + mountPath: /certs + {{- if and (include "kafka.sslEnabled" .context) (or .context.Values.tls.existingSecret .context.Values.tls.autoGenerated.enabled) }} + - name: kafka-certs + mountPath: /mounted-certs + readOnly: true + {{- end }} + {{- end }} + {{- if and .context.Values.usePasswordFiles (include "kafka.saslEnabled" .context) }} + - name: kafka-sasl + mountPath: /opt/drycc/kafka/config/secrets + readOnly: true + {{- end }} +{{- end -}} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/config-secrets.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/config-secrets.yaml new file mode 100644 index 00000000..6d971948 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/config-secrets.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (include "kafka.broker.createSecretConfig" .) (gt $replicaCount 0) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-secret-configuration" (include "kafka.broker.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + server-secret.properties: {{ include "kafka.broker.secretConfig" . | b64enc }} +{{- end }} + diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/configmap.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/configmap.yaml new file mode 100644 index 00000000..4fa0a4c9 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/configmap.yaml @@ -0,0 +1,54 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* +Return the Kafka broker configuration. +ref: https://kafka.apache.org/documentation/#configuration +*/}} +{{- define "kafka.broker.config" -}} +{{- if or .Values.config .Values.broker.config }} +{{- include "common.tplvalues.render" (dict "value" (coalesce .Values.broker.config .Values.config) "context" .) }} +{{- else }} +# Listeners configuration +listeners: {{ include "kafka.listeners" (dict "isController" false "context" .) }} +listener.security.protocol.map: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} +advertised.listeners: {{ include "kafka.advertisedListeners" . }} +inter.broker.listener.name: {{ .Values.listeners.interbroker.name }} +# Kafka data logs directory +log.dir: {{ printf "%s/data" .Values.broker.persistence.mountPath }} +# Kafka application logs directory +logs.dir: {{ .Values.broker.logPersistence.mountPath }} +# KRaft node role +process.roles: broker +# Common Kafka Configuration +{{ include "kafka.commonConfig" . }} +{{- end -}} +{{- end -}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (include "kafka.broker.createConfigmap" .) (gt $replicaCount 0) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "kafka.broker.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $configuration := include "kafka.broker.config" . | fromYaml -}} + {{- if or .Values.overrideConfiguration .Values.broker.overrideConfiguration }} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.overrideConfiguration "context" .) | fromYaml }} + {{- $brokerOverrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.broker.overrideConfiguration "context" .) | fromYaml }} + {{- $configuration = mustMergeOverwrite $configuration $overrideConfiguration $brokerOverrideConfiguration }} + {{- end }} + server.properties: |- + {{- range $key, $value := $configuration }} + {{ $key }}={{ include "common.tplvalues.render" (dict "value" $value "context" $) }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/hpa.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/hpa.yaml new file mode 100644 index 00000000..8fc187fc --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/hpa.yaml @@ -0,0 +1,43 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.broker.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kafka.broker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.broker.autoscaling.hpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.autoscaling.hpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "kafka.broker.fullname" . }} + minReplicas: {{ .Values.broker.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.broker.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.broker.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.broker.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.broker.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.broker.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/networkpolicy.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/networkpolicy.yaml new file mode 100644 index 00000000..784cb047 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/networkpolicy.yaml @@ -0,0 +1,113 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and .Values.networkPolicy.enabled (or (gt $replicaCount 0) .Values.broker.autoscaling.hpa.enabled) }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "kafka.broker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow internal communications between nodes + - ports: + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- range $listener := .Values.listeners.extraListeners }} + - port: {{ $listener.containerPort }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + {{- if eq .Values.service.type "LoadBalancer" }} + - {} + {{- else }} + # Allow client connections + - ports: + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- range $listener := .Values.listeners.extraListeners }} + - port: {{ $listener.containerPort }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - port: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.addExternalClientAccess }} + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.networkPolicy.ingressPodMatchLabels }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSMatchLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSPodMatchLabel "context" $ ) | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/pdb.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/pdb.yaml new file mode 100644 index 00000000..abc3ae65 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/pdb.yaml @@ -0,0 +1,30 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.broker.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.broker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.broker.pdb.minAvailable }} + minAvailable: {{ .Values.broker.pdb.minAvailable }} + {{- end }} + {{- if or .Values.broker.pdb.maxUnavailable (not .Values.broker.pdb.minAvailable) }} + maxUnavailable: {{ .Values.broker.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/statefulset.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/statefulset.yaml new file mode 100644 index 00000000..cd781cd3 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/statefulset.yaml @@ -0,0 +1,415 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if or (gt $replicaCount 0) .Values.broker.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "kafka.broker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.broker.podManagementPolicy }} + {{- if not .Values.broker.autoscaling.hpa.enabled }} + replicas: {{ .Values.broker.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + serviceName: {{ printf "%s-headless" (include "kafka.broker.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.broker.updateStrategy "context" $ ) | nindent 4 }} + {{- if .Values.broker.minReadySeconds }} + minReadySeconds: {{ .Values.broker.minReadySeconds }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + annotations: + {{- if include "kafka.broker.createConfigmap" . }} + checksum/configuration: {{ include (print $.Template.BasePath "/broker/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "kafka.createSaslSecret" .) (not .Values.existingKraftSecret) }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if include "kafka.createTlsSecret" . }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if include "kafka.metrics.jmx.createConfigmap" . }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/metrics/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.broker.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.broker.automountServiceAccountToken }} + {{- if .Values.broker.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.broker.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.broker.hostNetwork }} + hostIPC: {{ .Values.broker.hostIPC }} + {{- if .Values.broker.schedulerName }} + schedulerName: {{ .Values.broker.schedulerName | quote }} + {{- end }} + {{- if .Values.broker.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.broker.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAffinityPreset "component" "broker" "customLabels" $podLabels "topologyKey" .Values.broker.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAntiAffinityPreset "component" "broker" "customLabels" $podLabels "topologyKey" .Values.broker.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.broker.nodeAffinityPreset.type "key" .Values.broker.nodeAffinityPreset.key "values" .Values.broker.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.broker.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.broker.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.broker.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.broker.priorityClassName }} + priorityClassName: {{ .Values.broker.priorityClassName }} + {{- end }} + {{- if .Values.controller.runtimeClassName }} + runtimeClassName: {{ .Values.controller.runtimeClassName }} + {{- end }} + {{- if .Values.broker.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.broker.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + enableServiceLinks: {{ .Values.broker.enableServiceLinks }} + initContainers: + {{- if and .Values.defaultInitContainers.volumePermissions.enabled .Values.broker.persistence.enabled }} + {{- include "kafka.defaultInitContainers.volumePermissions" (dict "context" . "role" "broker") | nindent 8 }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.defaultInitContainers.autoDiscovery.enabled }} + {{- include "kafka.defaultInitContainers.autoDiscovery" (dict "context" . "role" "broker") | nindent 8 }} + {{- end }} + {{- include "kafka.defaultInitContainers.prepareConfig" (dict "context" . "role" "broker") | nindent 8 }} + {{- if .Values.broker.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.broker.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.broker.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.broker.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.broker.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.broker.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.broker.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.broker.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: KAFKA_HEAP_OPTS + value: {{ coalesce .Values.broker.heapOpts .Values.heapOpts | quote }} + - name: KAFKA_CFG_PROCESS_ROLES + value: broker + {{- include "kafka.commonEnv" . | nindent 12 }} + {{- if .Values.broker.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.broker.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.broker.extraEnvVarsCM .Values.extraEnvVarsCM .Values.broker.extraEnvVarsSecret .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.broker.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.broker.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.broker.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.broker.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.listeners.client.containerPort }} + - name: interbroker + containerPort: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - name: external + containerPort: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.listeners.extraListeners }} + {{- include "kafka.extraListeners.containerPorts" . | nindent 12 }} + {{- end }} + {{- if .Values.broker.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.broker.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - kafka + {{- end }} + {{- if .Values.broker.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "client" + {{- end }} + {{- if .Values.broker.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.broker.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.broker.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.broker.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "client" + {{- end }} + {{- end }} + {{- if .Values.broker.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.broker.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.broker.resources }} + resources: {{- toYaml .Values.broker.resources | nindent 12 }} + {{- else if ne .Values.broker.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.broker.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.broker.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.broker.logPersistence.mountPath }} + - name: kafka-config + mountPath: /opt/drycc/kafka/config/server.properties + subPath: server.properties + - name: tmp + mountPath: /tmp + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + mountPath: /opt/drycc/kafka/config/log4j2.yaml + subPath: log4j2.yaml + {{- end }} + {{- if include "kafka.sslEnabled" . }} + - name: kafka-shared-certs + mountPath: /opt/drycc/kafka/config/certs + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (include "kafka.saslEnabled" .) }} + - name: kafka-sasl + mountPath: /opt/drycc/kafka/config/secrets + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.broker.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.jmx.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_standalone.jar + - {{ .Values.metrics.jmx.containerPorts.metrics | quote }} + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- else if ne .Values.metrics.jmx.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.jmx.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.jmx.livenessProbe "enabled" | toYaml | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.jmx.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.jmx.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.broker.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: kafka-configmaps + configMap: + name: {{ include "kafka.broker.configmapName" . }} + - name: kafka-secret-config + {{- if (include "kafka.broker.secretConfigExists" .) }} + secret: + secretName: {{ include "kafka.broker.secretConfigName" . }} + {{- else }} + emptyDir: {} + {{- end }} + - name: kafka-config + emptyDir: {} + - name: tmp + emptyDir: {} + - name: init-shared + emptyDir: {} + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + configMap: + name: {{ include "kafka.log4j2.configMapName" . }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if include "kafka.sslEnabled" . }} + - name: kafka-shared-certs + emptyDir: {} + {{- if or .Values.tls.existingSecret .Values.tls.autoGenerated.enabled }} + - name: kafka-certs + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "kafka.tlsSecretName" . }} + {{- if .Values.tls.jksTruststoreSecret }} + - secret: + name: {{ .Values.tls.jksTruststoreSecret }} + {{- end }} + {{- end }} + {{- end }} + {{- if and .Values.usePasswordFiles (include "kafka.saslEnabled" .) }} + - name: kafka-sasl + projected: + sources: + - secret: + name: {{ include "kafka.saslSecretName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.broker.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.broker.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.broker.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.broker.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.broker.persistence.existingClaim .) }} + {{- end }} + {{- if not .Values.broker.logPersistence.enabled }} + - name: logs + emptyDir: {} + {{- else if .Values.broker.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.broker.logPersistence.existingClaim .) }} + {{- end }} + {{- if or (and .Values.broker.persistence.enabled (not .Values.broker.persistence.existingClaim)) (and .Values.broker.logPersistence.enabled (not .Values.broker.logPersistence.existingClaim)) }} + {{- if .Values.broker.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.broker.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.broker.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + {{- if and .Values.broker.persistence.enabled (not .Values.broker.persistence.existingClaim) }} + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- if .Values.broker.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.broker.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.broker.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.broker.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.broker.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.broker.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- if and .Values.broker.logPersistence.enabled (not .Values.broker.logPersistence.existingClaim) }} + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: logs + {{- if .Values.broker.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.broker.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.broker.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.broker.logPersistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.broker.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.broker.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.broker.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-external-access.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-external-access.yaml new file mode 100644 index 00000000..d15a41a8 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-external-access.yaml @@ -0,0 +1,75 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.externalAccess.enabled }} +{{- $fullname := include "kafka.broker.fullname" . }} +{{- $replicaCount := .Values.broker.replicaCount | int }} +{{- range $i := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullname) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" $fullname $i | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" $ | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.broker.service.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if or $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations $.Values.externalAccess.broker.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $.Values.externalAccess.broker.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.broker.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.broker.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if or $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.broker.service.annotations $.Values.commonAnnotations ) "context" $ ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $.Values.externalAccess.broker.service.type }} + {{- if eq $.Values.externalAccess.broker.service.type "LoadBalancer" }} + allocateLoadBalancerNodePorts: {{ $.Values.externalAccess.broker.service.allocateLoadBalancerNodePorts }} + {{- if (not (empty $.Values.externalAccess.broker.service.loadBalancerClass)) }} + loadBalancerClass: {{ $.Values.externalAccess.broker.service.loadBalancerClass }} + {{- end }} + {{- if and (not (empty $.Values.externalAccess.broker.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.broker.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $.Values.externalAccess.broker.service.loadBalancerIPs $i }} + {{- end }} + {{- if $.Values.externalAccess.broker.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.broker.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $.Values.externalAccess.broker.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $.Values.externalAccess.broker.service.ports.external }} + {{- if le (add $i 1) (len $.Values.externalAccess.broker.service.nodePorts) }} + nodePort: {{ index $.Values.externalAccess.broker.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: external + {{- if $.Values.externalAccess.broker.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.broker.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $.Values.externalAccess.broker.service.type "NodePort") (le (add $i 1) (len $.Values.externalAccess.broker.service.externalIPs)) }} + externalIPs: [{{ index $.Values.externalAccess.broker.service.externalIPs $i | quote }}] + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.broker.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + app.kubernetes.io/component: broker + statefulset.kubernetes.io/pod-name: {{ $targetPod }} + {{- with $.Values.externalAccess.broker.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with $.Values.externalAccess.broker.service.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} +--- +{{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-headless.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-headless.yaml new file mode 100644 index 00000000..b929342a --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/svc-headless.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if gt $replicaCount 0 }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "kafka.broker.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.externalAccess.broker.service.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.service.headless.broker.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.broker.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-interbroker + port: {{ .Values.service.ports.interbroker }} + protocol: TCP + targetPort: interbroker + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- with .Values.service.headless.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with .Values.service.headless.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/broker/vpa.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/vpa.yaml new file mode 100644 index 00000000..4e830922 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/broker/vpa.yaml @@ -0,0 +1,46 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.broker.replicaCount }} +{{- if and (gt $replicaCount 0) (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.broker.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ template "kafka.broker.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: broker + app.kubernetes.io/part-of: kafka + {{- if or .Values.broker.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.broker.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: kafka + {{- with .Values.broker.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.broker.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.broker.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ template "kafka.broker.fullname" . }} + {{- if .Values.broker.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.broker.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/ca-cert.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/ca-cert.yaml new file mode 100644 index 00000000..ecf626f0 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/ca-cert.yaml @@ -0,0 +1,53 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if include "kafka.createCertificate" . }} +{{- if empty .Values.tls.autoGenerated.certManager.existingIssuer }} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-clusterissuer" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selfSigned: {} +--- +{{- end }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-ca-crt" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ printf "%s-ca-crt" (include "common.names.fullname" .) }} + commonName: {{ printf "%s-root-ca" (include "common.names.fullname" .) }} + isCA: true + issuerRef: + name: {{ default (printf "%s-clusterissuer" (include "common.names.fullname" .)) .Values.tls.autoGenerated.certManager.existingIssuer }} + kind: {{ default "Issuer" .Values.tls.autoGenerated.certManager.existingIssuerKind }} +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-ca-issuer" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + ca: + secretName: {{ printf "%s-ca-crt" (include "common.names.fullname" .) }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/cert.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/cert.yaml new file mode 100644 index 00000000..f04f770d --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/cert.yaml @@ -0,0 +1,56 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if include "kafka.createCertificate" . }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ include "kafka.tlsSecretName" . }} + commonName: {{ printf "%s.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} + issuerRef: + name: {{ printf "%s-ca-issuer" (include "common.names.fullname" .) }} + kind: Issuer + subject: + organizations: + - "Kafka" + dnsNames: + {{- $controllerSvcName := printf "%s-headless" (include "kafka.controller.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- $brokerSvcName := printf "%s-headless" (include "kafka.broker.fullname" .) | trunc 63 | trimSuffix "-" }} + - '*.{{ include "common.names.namespace" . }}' + - '*.{{ include "common.names.namespace" . }}.svc' + - '*.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + - '*.{{ $controllerSvcName }}' + - '*.{{ $controllerSvcName }}.{{ include "common.names.namespace" . }}' + - '*.{{ $controllerSvcName }}.{{ include "common.names.namespace" . }}.svc' + - '*.{{ $controllerSvcName }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + - '*.{{ $brokerSvcName }}' + - '*.{{ $brokerSvcName }}.{{ include "common.names.namespace" . }}' + - '*.{{ $brokerSvcName }}.{{ include "common.names.namespace" . }}.svc' + - '*.{{ $brokerSvcName }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + {{- if .Values.externalAccess.enabled -}} + {{- with .Values.externalAccess.broker.service.domain }} + - '*.{{ . }}' + {{- end }} + {{- with .Values.externalAccess.controller.service.domain }} + - '*.{{ . }}' + {{- end }} + {{- end }} + {{- range .Values.tls.autoGenerated.customAltNames }} + - '{{ . }}' + {{- end }} + privateKey: + algorithm: {{ .Values.tls.autoGenerated.certManager.keyAlgorithm }} + size: {{ int .Values.tls.autoGenerated.certManager.keySize }} + duration: {{ .Values.tls.autoGenerated.certManager.duration }} + renewBefore: {{ .Values.tls.autoGenerated.certManager.renewBefore }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/config-secrets.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/config-secrets.yaml new file mode 100644 index 00000000..7c8ec908 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/config-secrets.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and (include "kafka.controller.createSecretConfig" .) (gt $replicaCount 0) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-secret-configuration" (include "kafka.controller.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + server-secret.properties: {{ include "kafka.controller.secretConfig" . | b64enc }} +{{- end }} + diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/configmap.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/configmap.yaml new file mode 100644 index 00000000..ce65ba47 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/configmap.yaml @@ -0,0 +1,56 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* +Return the Kafka controller configuration. +ref: https://kafka.apache.org/documentation/#configuration +*/}} +{{- define "kafka.controller.config" -}} +{{- if or .Values.config .Values.controller.config }} +{{- include "common.tplvalues.render" (dict "value" (coalesce .Values.controller.config .Values.config) "context" .) }} +{{- else }} +# Listeners configuration +listeners: {{ include "kafka.listeners" (dict "isController" true "context" .) }} +listener.security.protocol.map: {{ include "kafka.securityProtocolMap" (dict "isController" true "context" .) }} +{{- if not .Values.controller.controllerOnly }} +advertised.listeners: {{ include "kafka.advertisedListeners" . }} +inter.broker.listener.name: {{ .Values.listeners.interbroker.name }} +{{- end }} +# Kafka data logs directory +log.dir: {{ printf "%s/data" .Values.controller.persistence.mountPath }} +# Kafka application logs directory +logs.dir: {{ .Values.controller.logPersistence.mountPath }} +# KRaft node role +process.roles: {{ ternary "controller" "controller,broker" .Values.controller.controllerOnly }} +# Common Kafka Configuration +{{ include "kafka.commonConfig" . }} +{{- end -}} +{{- end -}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and (include "kafka.controller.createConfigmap" .) (gt $replicaCount 0) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "kafka.controller.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $configuration := include "kafka.controller.config" . | fromYaml -}} + {{- if or .Values.overrideConfiguration .Values.controller.overrideConfiguration }} + {{- $overrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.overrideConfiguration "context" .) | fromYaml }} + {{- $controllerOverrideConfiguration := include "common.tplvalues.render" (dict "value" .Values.controller.overrideConfiguration "context" .) | fromYaml }} + {{- $configuration = mustMergeOverwrite $configuration $overrideConfiguration $controllerOverrideConfiguration }} + {{- end }} + server.properties: |- + {{- range $key, $value := $configuration }} + {{ $key }}={{ include "common.tplvalues.render" (dict "value" $value "context" $) }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/hpa.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/hpa.yaml new file mode 100644 index 00000000..84c152df --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/hpa.yaml @@ -0,0 +1,43 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.controller.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kafka.controller.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if or .Values.controller.autoscaling.hpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.autoscaling.hpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ template "kafka.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.controller.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.hpa.targetCPU }} + {{- end }} + {{- if .Values.controller.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.hpa.targetMemory }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/networkpolicy.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/networkpolicy.yaml new file mode 100644 index 00000000..1df41987 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/networkpolicy.yaml @@ -0,0 +1,119 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $replicaCount := int .Values.controller.replicaCount }} +{{- if and .Values.networkPolicy.enabled (or (gt $replicaCount 0) .Values.controller.autoscaling.hpa.enabled) }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "kafka.controller.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow internal communications between nodes + - ports: + - port: {{ .Values.listeners.controller.containerPort }} + {{- if not .Values.controller.controllerOnly }} + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- range $listener := .Values.listeners.extraListeners }} + - port: {{ $listener.containerPort }} + {{- end }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + {{- if eq .Values.service.type "LoadBalancer" }} + - {} + {{- else }} + # Allow client connections + - ports: + - port: {{ .Values.listeners.controller.containerPort }} + {{- if not .Values.controller.controllerOnly }} + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- range $listener := .Values.listeners.extraListeners }} + - port: {{ $listener.containerPort }} + {{- end }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - port: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + {{- if or .Values.networkPolicy.allowCurrentNamespace .Values.networkPolicy.allowNamespaces }} + {{- if .Values.networkPolicy.allowCurrentNamespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + {{- end }} + {{- range $namespace := .Values.networkPolicy.allowNamespaces }} + {{- if $namespace }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ $namespace }} + {{- end }} + {{- end }} + {{- end }} + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if and .Values.networkPolicy.addExternalClientAccess (not .Values.controller.controllerOnly) }} + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.networkPolicy.ingressPodMatchLabels }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSMatchLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSPodMatchLabel "context" $ ) | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/pdb.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/pdb.yaml new file mode 100644 index 00000000..382fc2b6 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/pdb.yaml @@ -0,0 +1,30 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.controller.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.controller.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.controller.pdb.minAvailable }} + minAvailable: {{ .Values.controller.pdb.minAvailable }} + {{- end }} + {{- if or .Values.controller.pdb.maxUnavailable (not .Values.controller.pdb.minAvailable) }} + maxUnavailable: {{ .Values.controller.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/statefulset.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/statefulset.yaml new file mode 100644 index 00000000..3c960ac8 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/statefulset.yaml @@ -0,0 +1,426 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "kafka.controller.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.controller.podManagementPolicy }} + {{- if not .Values.controller.autoscaling.hpa.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + serviceName: {{ printf "%s-headless" (include "kafka.controller.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.controller.updateStrategy "context" $ ) | nindent 4 }} + {{- if .Values.controller.minReadySeconds }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + annotations: + {{- if include "kafka.controller.createConfigmap" . }} + checksum/configuration: {{ include (print $.Template.BasePath "/controller-eligible/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "kafka.createSaslSecret" .) (not .Values.existingKraftSecret) }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if include "kafka.createTlsSecret" . }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if include "kafka.metrics.jmx.createConfigmap" . }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/metrics/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.controller.automountServiceAccountToken }} + {{- if .Values.controller.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.controller.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + hostIPC: {{ .Values.controller.hostIPC }} + {{- if .Values.controller.schedulerName }} + schedulerName: {{ .Values.controller.schedulerName | quote }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.controller.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "topologyKey" .Values.controller.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAntiAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "topologyKey" .Values.controller.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.controller.nodeAffinityPreset.type "key" .Values.controller.nodeAffinityPreset.key "values" .Values.controller.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.controller.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if .Values.controller.runtimeClassName }} + runtimeClassName: {{ .Values.controller.runtimeClassName }} + {{- end }} + {{- if .Values.controller.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.controller.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + enableServiceLinks: {{ .Values.controller.enableServiceLinks }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.defaultInitContainers.volumePermissions.enabled .Values.controller.persistence.enabled }} + {{- include "kafka.defaultInitContainers.volumePermissions" (dict "context" . "role" "controller") | nindent 8 }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.defaultInitContainers.autoDiscovery.enabled (or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)) }} + {{- include "kafka.defaultInitContainers.autoDiscovery" (dict "context" . "role" "controller") | nindent 8 }} + {{- end }} + {{- include "kafka.defaultInitContainers.prepareConfig" (dict "context" . "role" "controller") | nindent 8 }} + {{- if .Values.controller.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.controller.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.controller.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.controller.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.controller.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.controller.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.controller.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.controller.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: KAFKA_HEAP_OPTS + value: {{ coalesce .Values.controller.heapOpts .Values.heapOpts | quote }} + - name: KAFKA_CFG_PROCESS_ROLES + value: {{ ternary "controller" "controller,broker" .Values.controller.controllerOnly | quote }} + - name: KAFKA_INITIAL_CONTROLLERS_FILE + value: /shared/initial-controllers.txt + {{- include "kafka.commonEnv" . | nindent 12 }} + {{- if .Values.controller.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.controller.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.controller.extraEnvVarsCM .Values.extraEnvVarsCM .Values.controller.extraEnvVarsSecret .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.controller.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.controller.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.controller.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.controller.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: controller + containerPort: {{ .Values.listeners.controller.containerPort }} + {{- if not .Values.controller.controllerOnly }} + - name: client + containerPort: {{ .Values.listeners.client.containerPort }} + - name: interbroker + containerPort: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - name: external + containerPort: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if and .Values.listeners.extraListeners (not .Values.controller.controllerOnly) }} + {{- include "kafka.extraListeners.containerPorts" . | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.controller.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - pgrep + - -f + - kafka + {{- end }} + {{- if .Values.controller.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "controller" + {{- end }} + {{- if .Values.controller.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.controller.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.controller.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.controller.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: "controller" + {{- end }} + {{- end }} + {{- if .Values.controller.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.controller.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{- toYaml .Values.controller.resources | nindent 12 }} + {{- else if ne .Values.controller.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.controller.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.controller.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.controller.logPersistence.mountPath }} + - name: kafka-config + mountPath: /opt/drycc/kafka/config/server.properties + subPath: server.properties + - name: tmp + mountPath: /tmp + - name: init-shared + mountPath: /shared + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + mountPath: /opt/drycc/kafka/config/log4j2.yaml + subPath: log4j2.yaml + {{- end }} + {{- if include "kafka.sslEnabled" . }} + - name: kafka-shared-certs + mountPath: /opt/drycc/kafka/config/certs + readOnly: true + {{- end }} + {{- if and .Values.usePasswordFiles (include "kafka.saslEnabled" .) }} + - name: kafka-sasl + mountPath: /opt/drycc/kafka/config/secrets + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.metrics.jmx.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - init-stack + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_standalone.jar + - {{ .Values.metrics.jmx.containerPorts.metrics | quote }} + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- else if ne .Values.metrics.jmx.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.metrics.jmx.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.jmx.livenessProbe "enabled" | toYaml | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.jmx.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.jmx.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.controller.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: kafka-configmaps + configMap: + name: {{ include "kafka.controller.configmapName" . }} + - name: kafka-secret-config + {{- if (include "kafka.controller.secretConfigExists" .) }} + secret: + secretName: {{ include "kafka.controller.secretConfigName" . }} + {{- else }} + emptyDir: {} + {{- end }} + - name: kafka-config + emptyDir: {} + - name: tmp + emptyDir: {} + - name: init-shared + emptyDir: {} + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + configMap: + name: {{ include "kafka.log4j2.configMapName" . }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if include "kafka.sslEnabled" . }} + - name: kafka-shared-certs + emptyDir: {} + {{- if or .Values.tls.existingSecret .Values.tls.autoGenerated.enabled }} + - name: kafka-certs + projected: + defaultMode: 256 + sources: + - secret: + name: {{ include "kafka.tlsSecretName" . }} + {{- if .Values.tls.jksTruststoreSecret }} + - secret: + name: {{ .Values.tls.jksTruststoreSecret }} + {{- end }} + {{- end }} + {{- end }} + {{- if and .Values.usePasswordFiles (include "kafka.saslEnabled" .) }} + - name: kafka-sasl + projected: + sources: + - secret: + name: {{ include "kafka.saslSecretName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.controller.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.controller.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.controller.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.controller.persistence.existingClaim .) }} + {{- end }} + {{- if not .Values.controller.logPersistence.enabled }} + - name: logs + emptyDir: {} + {{- else if .Values.controller.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.controller.logPersistence.existingClaim .) }} + {{- end }} + {{- if or (and .Values.controller.persistence.enabled (not .Values.controller.persistence.existingClaim)) (and .Values.controller.logPersistence.enabled (not .Values.controller.logPersistence.existingClaim)) }} + {{- if .Values.controller.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.controller.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.controller.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + {{- if and .Values.controller.persistence.enabled (not .Values.controller.persistence.existingClaim) }} + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- if .Values.controller.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.controller.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.controller.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.controller.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.controller.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.controller.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- if and .Values.controller.logPersistence.enabled (not .Values.controller.logPersistence.existingClaim) }} + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: logs + {{- if .Values.controller.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.controller.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.controller.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.controller.logPersistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.controller.logPersistence "global" .Values.global) | nindent 8 }} + {{- if .Values.controller.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.controller.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-external-access.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-external-access.yaml new file mode 100644 index 00000000..08c1a5e9 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-external-access.yaml @@ -0,0 +1,77 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.externalAccess.enabled }} +{{- $fullname := include "kafka.controller.fullname" . }} +{{- if or .Values.externalAccess.controller.forceExpose (not .Values.controller.controllerOnly)}} +{{- $replicaCount := .Values.controller.replicaCount | int }} +{{- range $i := until $replicaCount }} +{{- $targetPod := printf "%s-%d" $fullname $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" $fullname $i | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" $ | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.controller.service.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if or $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations $.Values.externalAccess.controller.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $.Values.externalAccess.controller.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.controller.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.controller.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if or $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.controller.service.annotations $.Values.commonAnnotations ) "context" $ ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $.Values.externalAccess.controller.service.type }} + {{- if eq $.Values.externalAccess.controller.service.type "LoadBalancer" }} + allocateLoadBalancerNodePorts: {{ $.Values.externalAccess.controller.service.allocateLoadBalancerNodePorts }} + {{- if (not (empty $.Values.externalAccess.controller.service.loadBalancerClass)) }} + loadBalancerClass: {{ $.Values.externalAccess.controller.service.loadBalancerClass }} + {{- end }} + {{- if and (not (empty $.Values.externalAccess.controller.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.controller.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $.Values.externalAccess.controller.service.loadBalancerIPs $i }} + {{- end }} + {{- if $.Values.externalAccess.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.controller.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $.Values.externalAccess.controller.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $.Values.externalAccess.controller.service.ports.external }} + {{- if le (add $i 1) (len $.Values.externalAccess.controller.service.nodePorts) }} + nodePort: {{ index $.Values.externalAccess.controller.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: external + {{- if $.Values.externalAccess.controller.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.controller.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $.Values.externalAccess.controller.service.type "NodePort") (le (add $i 1) (len $.Values.externalAccess.controller.service.externalIPs)) }} + externalIPs: [{{ index $.Values.externalAccess.controller.service.externalIPs $i | quote }}] + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.controller.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + app.kubernetes.io/component: controller-eligible + statefulset.kubernetes.io/pod-name: {{ $targetPod }} + {{- with $.Values.externalAccess.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with $.Values.externalAccess.controller.service.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-headless.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-headless.yaml new file mode 100644 index 00000000..9ab5a12a --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/svc-headless.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "kafka.controller.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.controller.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if or .Values.service.headless.controller.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.controller.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + {{- if not .Values.controller.controllerOnly }} + - name: tcp-interbroker + port: {{ .Values.service.ports.interbroker }} + protocol: TCP + targetPort: interbroker + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- end }} + - name: tcp-controller + protocol: TCP + port: {{ .Values.service.ports.controller }} + targetPort: controller + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- with .Values.service.headless.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with .Values.service.headless.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/vpa.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/vpa.yaml new file mode 100644 index 00000000..2a33c177 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/controller-eligible/vpa.yaml @@ -0,0 +1,45 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.apiVersions.has" ( dict "version" "autoscaling.k8s.io/v1/VerticalPodAutoscaler" "context" . )) .Values.controller.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ template "kafka.controller.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: controller-eligible + app.kubernetes.io/part-of: kafka + {{- if or .Values.controller.autoscaling.vpa.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.controller.autoscaling.vpa.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: kafka + {{- with .Values.controller.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ (include "common.capabilities.statefulset.apiVersion" .) }} + kind: StatefulSet + name: {{ template "kafka.controller.fullname" . }} + {{- if .Values.controller.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.controller.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/extra-list.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/extra-list.yaml new file mode 100644 index 00000000..9570df4a --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/log4j2-configmap.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/log4j2-configmap.yaml new file mode 100644 index 00000000..c4f9f5ee --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/log4j2-configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.log4j2 (not .Values.existingLog4j2ConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-log4j2-configuration" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j2.yaml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j2 "context" $ ) | nindent 4 }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-configmap.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-configmap.yaml new file mode 100644 index 00000000..5ecdee30 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-configmap.yaml @@ -0,0 +1,70 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if include "kafka.metrics.jmx.createConfigmap" . }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + jmx-kafka-prometheus.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }} + rules: + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$3 + - pattern : kafka.network<>(Count|Value) + name: kafka_network_$1_$2_$3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 + {{- if .Values.metrics.jmx.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }} + {{- end }} +{{- end -}} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-servicemonitor.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-servicemonitor.yaml new file mode 100644 index 00000000..d5d3866b --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: {{ .Values.metrics.serviceMonitor.path }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-svc.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-svc.yaml new file mode 100644 index 00000000..ffa4a1b3 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/jmx-svc.yaml @@ -0,0 +1,38 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.jmx.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- with .Values.metrics.jmx.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with .Values.metrics.jmx.service.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/prometheusrule.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/prometheusrule.yaml new file mode 100644 index 00000000..5e6df743 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/metrics/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.jmx.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.prometheusRule.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} + {{- end }} +spec: + groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/job.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/job.yaml new file mode 100644 index 00000000..ca2bc9e9 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/job.yaml @@ -0,0 +1,348 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.enabled }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + annotations: + {{- if .Values.provisioning.useHelmHooks }} + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.provisioning.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }} + automountServiceAccountToken: {{ .Values.provisioning.automountServiceAccountToken }} + enableServiceLinks: {{ .Values.provisioning.enableServiceLinks }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.provisioning.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + initContainers: + - name: prepare-config + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.provisioning.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/libkafka.sh + + if [[ ! -f "$KAFKA_CONF_FILE" ]]; then + touch $KAFKA_CONF_FILE + + kafka_server_conf_set security.protocol {{ .Values.listeners.client.protocol | quote }} + {{- if regexFind "SSL" (upper .Values.listeners.client.protocol) }} + kafka_server_conf_set ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_server_conf_set ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_server_conf_set ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + {{- if .Values.provisioning.auth.tls.caCert }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + # Convert the PEM key to PKCS#8 Format for Java compatibility + openssl pkcs8 -topk8 -nocrypt -passin pass:"${KAFKA_TLS_PEM_KEY_PASSWORD:-}" -in "/certs/{{ .Values.provisioning.auth.tls.key }}" -out "/shared/tls.key" + kafka_server_conf_set ssl.keystore.key "$(file_to_multiline_property "/shared/tls.key")" + kafka_server_conf_set ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + kafka_server_conf_set ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + {{- else }} + kafka_server_conf_set ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_server_conf_set ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + {{- end }} + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_server_conf_set ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_server_conf_set ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_server_conf_set ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_server_conf_set ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + {{- if regexFind "PLAIN" ( upper .Values.sasl.enabledMechanisms) }} + kafka_server_conf_set sasl.mechanism PLAIN + kafka_server_conf_set sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "SCRAM-SHA-256" ( upper .Values.sasl.enabledMechanisms) }} + kafka_server_conf_set sasl.mechanism SCRAM-SHA-256 + kafka_server_conf_set sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "SCRAM-SHA-512" ( upper .Values.sasl.enabledMechanisms) }} + kafka_server_conf_set sasl.mechanism SCRAM-SHA-512 + kafka_server_conf_set sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "OAUTHBEARER" ( upper .Values.sasl.enabledMechanisms) }} + kafka_server_conf_set sasl.mechanism OAUTHBEARER + kafka_server_conf_set sasl.jaas.config "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required clientId=\"$SASL_CLIENT_ID\" password=\"$SASL_CLIENT_SECRET\";" + kafka_server_conf_set sasl.login.callback.handler.class "org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler" + kafka_server_conf_set sasl.oauthbearer.token.endpoint.url {{ .Values.sasl.oauthbearer.tokenEndpointUrl | quote }} + {{- end }} + {{- end }} + fi + env: + - name: KAFKA_CONF_FILE + value: /shared/client.properties + {{- if and (regexFind "SSL" (upper .Values.listeners.client.protocol)) .Values.provisioning.auth.tls.passwordsSecret }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + {{- if and (not .Values.tls.autoGenerated.enabled) (or .Values.tls.keyPassword (and .Values.tls.passwordsSecret .Values.tls.passwordsSecretPemPasswordKey)) }} + - name: KAFKA_TLS_PEM_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.tlsPasswordsSecretName" .context }} + key: {{ default "key-password" .context.Values.tls.passwordsSecretPemPasswordKey | quote }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + {{- if include "kafka.saslUserPasswordsEnabled" . }} + - name: SASL_USERNAME + value: {{ index .Values.sasl.client.users 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: system-user-password + {{- end }} + {{- if include "kafka.saslClientSecretsEnabled" . }} + - name: SASL_CLIENT_ID + value: {{ .Values.sasl.interbroker.clientId | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-client-secret + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- else if ne .Values.provisioning.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.provisioning.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.provisioning.waitForKafka }} + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.provisioning.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + . /opt/drycc/scripts/libos.sh + + exit_code=0 + if ! retry_while "/opt/drycc/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server ${KAFKA_SERVICE} --command-config /shared/client.properties"; then + echo "Kafka is not ready" + exit_code=1 + else + echo "Kafka ready" + fi + + exit "$exit_code" + env: + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- else if ne .Values.provisioning.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.provisioning.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.provisioning.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + {{- if .Values.provisioning.preScript }} + echo "Running pre-provisioning script" + {{ .Values.provisioning.preScript | nindent 14 }} + {{- end }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/drycc/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config /shared/client.properties \ + --topic {{ $topic.name }}" + {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})); do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))); do + ${kafka_provisioning_commands[j]} & + done + # Wait the end of the jobs + wait + done + + {{- if .Values.provisioning.postScript }} + echo "Running post-provisioning script" + {{ .Values.provisioning.postScript | nindent 14 }} + {{- end }} + + echo "Provisioning succeeded" + {{- end }} + env: + - name: DRYCC_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- else if ne .Values.provisioning.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.provisioning.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + mountPath: /opt/drycc/kafka/config/log4j2.yaml + subPath: log4j2.yaml + {{- end }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + - name: shared + mountPath: /shared + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j2 .Values.existingLog4j2ConfigMap }} + - name: log4j2-config + configMap: + name: {{ include "kafka.log4j2.configMapName" . }} + {{- end }} + {{- if (regexFind "SSL" (upper .Values.listeners.client.protocol)) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + - name: shared + emptyDir: {} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/serviceaccount.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/serviceaccount.yaml new file mode 100644 index 00000000..b8f23f58 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.provisioning.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.provisioning.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/tls-secret.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/tls-secret.yaml new file mode 100644 index 00000000..43f29ab9 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/provisioning/tls-secret.yaml @@ -0,0 +1,21 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.provisioning.enabled (regexFind "SSL" (upper .Values.listeners.client.protocol)) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/role.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/role.yaml new file mode 100644 index 00000000..7c0db0ae --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/role.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/rolebinding.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/rolebinding.yaml new file mode 100644 index 00000000..cd4d7ba1 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/serviceaccount.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/serviceaccount.yaml new file mode 100644 index 00000000..fd524074 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/rbac/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/secrets.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/secrets.yaml new file mode 100644 index 00000000..6b0ab866 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/secrets.yaml @@ -0,0 +1,132 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "kafka.createSaslSecret" .) }} +{{- $secretName := printf "%s-user-passwords" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (include "kafka.client.saslEnabled" .) }} + {{- $secretValue := "" }} + {{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .) $secretName).data }} + {{- if and $secretData (hasKey $secretData "client-passwords")}} + {{- $secretValue = index $secretData "client-passwords" }} + {{- end }} + {{- if not (empty .Values.sasl.client.passwords) }} + {{- $secretValue = join "," .Values.sasl.client.passwords | toString | b64enc }} + {{- else if or (empty $secretValue) (not (eq (len .Values.sasl.client.users) (len (splitList "," (b64dec $secretValue))))) }} + {{- $clientPasswords := list }} + {{- range .Values.sasl.client.users }} + {{- $clientPasswords = append $clientPasswords (randAlphaNum 10) }} + {{- end }} + {{- $secretValue = join "," $clientPasswords | toString | b64enc }} + {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + client-passwords: {{ $secretValue | quote }} + system-user-password: {{ index (splitList "," (b64dec $secretValue)) 0 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + inter-broker-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "inter-broker-password" "providedValues" (list "sasl.interbroker.password") "failOnNew" false "context" $) }} + {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + inter-broker-client-secret: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "inter-broker-client-secret" "providedValues" (list "sasl.interbroker.clientSecret") "failOnNew" false "context" $) }} + {{- end }} + {{- end }} + {{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + controller-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "controller-password" "providedValues" (list "sasl.controller.password") "failOnNew" false "context" $) }} + {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + controller-client-secret: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "controller-client-secret" "providedValues" (list "sasl.controller.clientSecret") "failOnNew" false "context" $) }} + {{- end }} + {{- end }} +{{- end }} +{{- if not .Values.existingKraftSecret }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-kraft" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + cluster-id: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-kraft" (include "common.names.fullname" .)) "key" "cluster-id" "providedValues" (list "clusterId") "length" 22 "context" $) }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $key := printf "controller-%d-id" $i }} + {{ $key }}: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-kraft" (include "common.names.fullname" $)) "key" $key "providedValues" (list "") "length" 22 "failOnNew" false "context" $) }} + {{- end }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +{{- if (include "kafka.client.saslEnabled" .) }} +{{- $host := list }} +{{- $port := .Values.service.ports.client }} +{{- $bootstrapServers := list }} +{{- if not .Values.controller.controllerOnly }} + {{- range $i, $e := until (int .Values.controller.replicaCount) }} + {{- $controller := printf "%s-controller-%s.%s-headless.%s.svc.%s" (include "common.names.fullname" $) (print $i) (include "common.names.fullname" $) $.Release.Namespace $.Values.clusterDomain }} + {{- $host = append $host $controller }} + {{- $bootstrapServers = append $bootstrapServers (printf "%s:%s" $controller $.Values.service.ports.client) }} + {{- end }} +{{- end }} +{{- range $i, $e := until (int .Values.broker.replicaCount) }} + {{- $broker := printf "%s-broker-%s.%s-headless.%s.svc.%s" (include "common.names.fullname" $) (print $i) (include "common.names.fullname" $) $.Release.Namespace $.Values.clusterDomain }} + {{- $host = append $host $broker }} + {{- $bootstrapServers = append $bootstrapServers (printf "%s:%s" $broker $.Values.service.ports.client) }} +{{- end }} +{{- range $i, $e := until (len .Values.sasl.client.users) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" $ }}-svcbind-user-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/kafka +data: + provider: {{ print "drycc" | b64enc | quote }} + type: {{ print "kafka" | b64enc | quote }} + username: {{ index $.Values.sasl.client.users $i | b64enc | quote }} + password: {{ index $.Values.sasl.client.passwords $i | b64enc | quote }} + host: {{ join "," $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }} +{{- end }} +{{- else }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/kafka +data: + provider: {{ print "drycc" | b64enc | quote }} + type: {{ print "kafka" | b64enc | quote }} + host: {{ join "," $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/svc.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/svc.yaml new file mode 100644 index 00000000..e89be9c5 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/svc.yaml @@ -0,0 +1,76 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if (eq .Values.service.type "LoadBalancer") }} + allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }} + {{- if (not (empty .Values.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.service.loadBalancerClass }} + {{- end }} + {{- if (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.externalAccess.enabled }} + - name: tcp-external + port: {{ .Values.service.ports.external }} + protocol: TCP + targetPort: external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.controller.controllerOnly }} + app.kubernetes.io/component: broker + {{- end }} + {{- with .Values.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ . | quote }} + {{- end }} + {{- with .Values.service.ipFamilies }} + ipFamilies: + {{- . | toYaml | nindent 2 }} + {{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/templates/tls-secret.yaml b/addons/kafka/4.2/chart/kafka-4.2/templates/tls-secret.yaml new file mode 100644 index 00000000..b4b6ad44 --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/templates/tls-secret.yaml @@ -0,0 +1,65 @@ +{{- /* +Copyright Drycc Community. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if include "kafka.createTlsSecret" . }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $secretName := include "kafka.tlsSecretName" . }} +{{- $altNames := list (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $fullname $releaseNamespace) $fullname "127.0.0.1" "localhost" }} +{{- $controllerSvcName := printf "%s-headless" (include "kafka.controller.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- $brokerSvcName := printf "%s-headless" (include "kafka.broker.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- $altNames = concat $altNames (list (printf "*.%s.%s.svc.%s" $controllerSvcName $releaseNamespace $clusterDomain) (printf "*.%s.%s" $controllerSvcName $releaseNamespace) (printf "*.%s" $controllerSvcName)) }} +{{- $altNames = concat $altNames (list (printf "*.%s.%s.svc.%s" $brokerSvcName $releaseNamespace $clusterDomain) (printf "*.%s.%s" $brokerSvcName $releaseNamespace) (printf "*.%s" $brokerSvcName)) }} +{{- if .Values.externalAccess.enabled -}} + {{- with .Values.externalAccess.broker.service.domain }} + {{- $altNames = append $altNames . }} + {{- end }} + {{- with .Values.externalAccess.controller.service.domain }} + {{- $altNames = append $altNames . }} + {{- end }} +{{- end }} +{{- with .Values.tls.autoGenerated.customAltNames }} + {{- $altNames = concat $altNames . }} +{{- end }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} +--- +{{- end }} +{{- if include "kafka.createTlsPasswordsSecret" . }} +{{- $secretName := include "kafka.tlsPasswordsSecretName" . }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: kafka + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{ .Values.tls.passwordsSecretKeystoreKey }}: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" .Values.tls.passwordsSecretKeystoreKey "providedValues" (list "tls.keystorePassword") "context" $) }} + {{ .Values.tls.passwordsSecretTruststoreKey }}: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" .Values.tls.passwordsSecretTruststoreKey "providedValues" (list "tls.truststorePassword") "context" $) }} + {{- if .Values.tls.keyPassword }} + {{ default "key-password" .Values.tls.passwordsSecretPemPasswordKey }}: {{ .Values.tls.keyPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/chart/kafka-4.2/values.yaml b/addons/kafka/4.2/chart/kafka-4.2/values.yaml new file mode 100644 index 00000000..dc3c9c0f --- /dev/null +++ b/addons/kafka/4.2/chart/kafka-4.2/values.yaml @@ -0,0 +1,2493 @@ +# Copyright Drycc Community. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param apiVersions Override Kubernetes API versions reported by .Capabilities +## +apiVersions: [] +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param usePasswordFiles Mount credentials as files instead of using environment variables +## +usePasswordFiles: true +## Diagnostic mode +## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) +## @param diagnosticMode.command Command to override all containers in the chart release +## @param diagnosticMode.args Args to override all containers in the chart release +## +diagnosticMode: + enabled: false + command: + - sleep + args: + - infinity +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## @section Kafka common parameters + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry [default: REGISTRY_NAME] Kafka image registry +## @param image.repository [default: REPOSITORY_NAME/kafka] Kafka image repository +## @skip image.tag Kafka image tag (immutable tags are recommended) +## @param image.digest Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: registry.drycc.cc + repository: drycc-addons/kafka + tag: "4.2" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## @param clusterId Kafka Kraft cluster ID (ignored if existingKraftSecret is set). A random cluster ID will be generated the 1st time Kraft is initialized if not set. +## NOTE: Already initialized Kafka nodes will use cluster ID stored in their persisted storage. +## If reusing existing PVCs, make sure the cluster ID is set matching the stored cluster ID, otherwise new nodes will fail to join the cluster. +## In case the cluster ID stored in the secret does not match the value stored in /bitnami/kafka/data/meta.properties, remove the secret and upgrade the chart setting the correct value. +## +clusterId: "" +## @param existingKraftSecret Name of the secret containing the Kafka KRaft Cluster ID and one directory ID per controller replica +## +existingKraftSecret: "" +## @param kraftVersion Kraft version to be used. It determines whether static quorum (kraftVersion=0) or dynamic quorum (kraftVersion=1) will be used. +## NOTE: Kafka 4.0 does not yet support switching kraft version. This setting was added for backward-compatibility with 3.x clusters. +## Ref: https://kafka.apache.org/documentation/#static_versus_dynamic_kraft_quorums +## +kraftVersion: 1 +## @param config Specify content for Kafka configuration (auto-generated based on other parameters otherwise) +## NOTE: This will override the configuration based on values, please act carefully +## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g: +## process.roles: broker +## ... will be transformed to: +## process.roles=broker +## +config: {} +## @param overrideConfiguration Kafka common configuration override. Values defined here takes precedence over the ones defined at `config` +## +overrideConfiguration: {} +## @param existingConfigmap Name of an existing ConfigMap with the Kafka configuration +## +existingConfigmap: "" +## @param secretConfig Additional configuration to be appended at the end of the generated Kafka configuration (store in a secret) +## +secretConfig: "" +## @param existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration +## The key for the configuration should be: server-secret.properties +## NOTE: This will override secretConfig value +## +existingSecretConfig: "" +## @param log4j2 Specify content for Kafka log4j2 configuration (default one is used otherwise) +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j2.yaml +## +log4j2: "" +## @param existingLog4j2ConfigMap The name of an existing ConfigMap containing the log4j2.yaml file +## +existingLog4j2ConfigMap: "" +## @param heapOpts Kafka Java Heap configuration +## +heapOpts: -XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75 +## @param brokerRackAwareness.enabled Enable Kafka Rack Awareness +## @param brokerRackAwareness.cloudProvider Cloud provider to use to set Broker Rack Awareness. Allowed values: `aws-az`, `azure` +## @param brokerRackAwareness.azureApiVersion Metadata API version to use when brokerRackAwareness.cloudProvider is set to `azure` +## ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica +## +brokerRackAwareness: + enabled: false + cloudProvider: "" + azureApiVersion: "2023-11-15" +## @param interBrokerProtocolVersion Override the setting 'inter.broker.protocol.version' during the ZK migration. +## Ref. https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html +## +interBrokerProtocolVersion: "" +## Kafka listeners configuration +## +listeners: + ## @param listeners.client.name Name for the Kafka client listener + ## @param listeners.client.containerPort Port for the Kafka client listener + ## @param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + client: + containerPort: 9092 + protocol: SASL_PLAINTEXT + name: CLIENT + sslClientAuth: "" + ## @param listeners.controller.name Name for the Kafka controller listener + ## @param listeners.controller.containerPort Port for the Kafka controller listener + ## @param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + ## Ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-684+-+Support+mutual+TLS+authentication+on+SASL_SSL+listeners + controller: + name: CONTROLLER + containerPort: 9093 + protocol: SASL_PLAINTEXT + sslClientAuth: "" + ## @param listeners.interbroker.name Name for the Kafka inter-broker listener + ## @param listeners.interbroker.containerPort Port for the Kafka inter-broker listener + ## @param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' + interbroker: + containerPort: 9094 + protocol: SASL_PLAINTEXT + name: INTERNAL + sslClientAuth: "" + ## @param listeners.external.containerPort Port for the Kafka external listener + ## @param listeners.external.protocol Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + ## @param listeners.external.name Name for the Kafka external listener + ## @param listeners.external.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' + external: + containerPort: 9095 + protocol: SASL_PLAINTEXT + name: EXTERNAL + sslClientAuth: "" + ## @param listeners.extraListeners Array of listener objects to be appended to already existing listeners + ## E.g. + ## extraListeners: + ## - name: CUSTOM + ## containerPort: 9097 + ## protocol: SASL_PLAINTEXT + ## sslClientAuth: "" + ## + extraListeners: [] + ## NOTE: If set, below values will override configuration set using the above values (extraListeners.*, controller.*, interbroker.*, client.* and external.*) + ## @param listeners.overrideListeners Overrides the Kafka 'listeners' configuration setting. + ## @param listeners.advertisedListeners Overrides the Kafka 'advertised.listener' configuration setting. + ## @param listeners.securityProtocolMap Overrides the Kafka 'security.protocol.map' configuration setting. + overrideListeners: "" + advertisedListeners: "" + securityProtocolMap: "" +## @section Kafka SASL parameters +## Kafka SASL settings for authentication, required if SASL_PLAINTEXT or SASL_SSL listeners are configured +## +sasl: + ## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` + ## NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. + ## + enabledMechanisms: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512 + ## @param sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: PLAIN + ## @param sasl.controllerMechanism SASL mechanism for controller communications. + ## + controllerMechanism: PLAIN + ## Settings for OAuthBearer mechanism + ## @param sasl.oauthbearer.tokenEndpointUrl The URL for the OAuth/OIDC identity provider + ## @param sasl.oauthbearer.jwksEndpointUrl The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved + ## @param sasl.oauthbearer.expectedAudience The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences + ## @param sasl.oauthbearer.subClaimName The OAuth claim name for the subject. + ## + oauthbearer: + tokenEndpointUrl: "" + jwksEndpointUrl: "" + expectedAudience: "" + subClaimName: "sub" + ## Credentials for inter-broker communications. + ## @param sasl.interbroker.user Username for inter-broker communications when SASL is enabled + ## @param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. + ## @param sasl.interbroker.clientId Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER + ## @param sasl.interbroker.clientSecret Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. + ## + interbroker: + user: inter_broker_user + password: "" + clientId: inter_broker_client + clientSecret: "" + ## Credentials for controller communications. + ## @param sasl.controller.user Username for controller communications when SASL is enabled + ## @param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. + ## @param sasl.controller.clientId Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER + ## @param sasl.controller.clientSecret Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. + ## + controller: + user: controller_user + password: "" + clientId: controller_broker_client + clientSecret: "" + ## Credentials for client communications. + ## @param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled + ## @param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users + ## + client: + users: + - drycc + passwords: "" + ## @param sasl.existingSecret Name of the existing secret containing credentials for client.users, interbroker.user and controller.user + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=inter-broker-client-secret=INTER_BROKER_CLIENT_SECRET --from-literal=controller-password=CONTROLLER_PASSWORD --from-literal=controller-client-secret=CONTROLLER_CLIENT_SECRET + ## The client secrets are only required when using OAuthBearer as SASL mechanism. + ## Client, inter-broker and controller passwords are only required if the SASL mechanism includes something other than OAuthBearer. + ## + existingSecret: "" +## @section Kafka TLS parameters +## Kafka TLS settings, required if SSL or SASL_SSL listeners are configured +## +tls: + ## @param tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM` + ## + type: JKS + ## @param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param tls.autoGenerated.enabled Enable automatic generation of TLS certificates (only supported if `tls.type` is `PEM`) + ## @param tls.autoGenerated.engine Mechanism to generate the certificates (allowed values: helm, cert-manager) + ## @param tls.autoGenerated.customAltNames List of additional subject alternative names (SANs) for the automatically generated TLS certificates. + ## @param tls.autoGenerated.certManager.existingIssuer The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine) + ## @param tls.autoGenerated.certManager.existingIssuerKind Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine) + ## @param tls.autoGenerated.certManager.keyAlgorithm Key algorithm for the certificates (only for `cert-manager` engine) + ## @param tls.autoGenerated.certManager.keySize Key size for the certificates (only for `cert-manager` engine) + ## @param tls.autoGenerated.certManager.duration Duration for the certificates (only for `cert-manager` engine) + ## @param tls.autoGenerated.certManager.renewBefore Renewal period for the certificates (only for `cert-manager` engine) + ## + autoGenerated: + enabled: true + engine: helm + customAltNames: [] + certManager: + existingIssuer: "" + existingIssuerKind: "" + keySize: 2048 + keyAlgorithm: RSA + duration: 2160h + renewBefore: 360h + ## @param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka--X.keystore.jks` where X is the replica number of the . + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks \ + ## --from-file=kafka-controller-0.keystore.jks=./kafka-controller-0.keystore.jks --from-file=kafka-broker-0.keystore.jks=./kafka-broker-0.keystore.jks ... + ## + ## NOTE: Alternatively, a single keystore can be provided for all nodes under the key 'kafka.keystore.jks', this keystore will be used by all nodes unless overridden by the 'kafka--X.keystore.jks' file + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./ca.crt --from-file=kafka-controller-0.crt=./kafka-controller-0.crt --from-file=kafka-controller-0.key=./kafka-controller-0.key \ + ## --from-file=kafka-broker-0.crt=./kafka-broker-0.crt --from-file=kafka-broker-0.key=./kafka-broker-0.key ... + ## + ## NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'tls.crt' and 'tls.key'. These certificates will be used by all nodes unless overridden by the 'kafka--X.key' and 'kafka--X.crt' files + ## + existingSecret: "" + ## @param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + passwordsSecret: "" + ## @param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password + ## @param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. + ## + passwordsSecretPemPasswordKey: "" + ## @param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + keystorePassword: "" + ## @param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. + ## When using tls.type=PEM, the generated keystore will use this password or randomly generate one. + ## + truststorePassword: "" + ## @param tls.keyPassword Password to access the PEM key when it is password-protected. + ## Note: ignored when using 'tls.passwordsSecret' + ## + keyPassword: "" + ## @param tls.jksKeystoreKey The secret key from the `tls.existingSecret` containing the keystore + ## Note: ignored when using 'pem' format for certificates. + ## + jksKeystoreKey: "" + ## @param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param tls.jksTruststoreKey The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreKey: "" + ## @param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https + ## @param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + sslClientAuth: "required" +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## DNS-Pod services +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## @param dnsPolicy Specifies the DNS policy for the Kafka pods +## DNS policies can be set on a per-Pod basis. Currently Kubernetes supports the following Pod-specific DNS policies. +## Available options: Default, ClusterFirst, ClusterFirstWithHostNet, None +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +## @param dnsConfig allows users more control on the DNS settings for a Pod. Required if `dnsPolicy` is set to `None` +## The dnsConfig field is optional and it can work with any dnsPolicy settings. +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config +## E.g. +## dnsConfig: +## nameservers: +## - 192.0.2.1 # this is an example +## searches: +## - ns1.svc.cluster-domain.example +## - my.dns.search.suffix +## options: +## - name: ndots +## value: "2" +## - name: edns0 +dnsConfig: {} + +## Default init Containers +## +defaultInitContainers: + ## 'volume-permissions' init container + ## Used to change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node + ## + volumePermissions: + ## @param defaultInitContainers.volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param defaultInitContainers.volumePermissions.image.registry [default: REGISTRY_NAME] "volume-permissions" init-containers' image registry + ## @param defaultInitContainers.volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] "volume-permissions" init-containers' image repository + ## @skip defaultInitContainers.volumePermissions.image.tag "volume-permissions" init-containers' image tag (immutable tags are recommended) + ## @param defaultInitContainers.volumePermissions.image.digest "volume-permissions" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param defaultInitContainers.volumePermissions.image.pullPolicy "volume-permissions" init-containers' image pull policy + ## @param defaultInitContainers.volumePermissions.image.pullSecrets "volume-permissions" init-containers' image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc/base + tag: trixie + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure "volume-permissions" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.enabled Enabled "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser Set runAsUser in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.privileged Set privileged in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.add List of capabilities to be added in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.seccompProfile.type Set seccomp profile in "volume-permissions" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 0 + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Kafka "volume-permissions" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.volumePermissions.resourcesPreset Set Kafka "volume-permissions" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.volumePermissions.resources is set (defaultInitContainers.volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.volumePermissions.resources Set Kafka "volume-permissions" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Kafka "prepare-config" init container + ## Used to prepare the Kafka configuration files for main containers to use them + ## + prepareConfig: + ## Configure "prepare-config" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.enabled Enabled "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsUser Set runAsUser in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsGroup Set runAsUser in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.privileged Set privileged in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "prepare-config" init-containers' Security Context + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.add List of capabilities to be added in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "prepare-config" init-containers + ## @param defaultInitContainers.prepareConfig.containerSecurityContext.seccompProfile.type Set seccomp profile in "prepare-config" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Kafka "prepare-config" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.prepareConfig.resourcesPreset Set Kafka "prepare-config" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.prepareConfig.resources is set (defaultInitContainers.prepareConfig.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.prepareConfig.resources Set Kafka "prepare-config" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param defaultInitContainers.prepareConfig.extraInit Additional content for the "prepare-config" init script, rendered as a template. + ## + extraInit: "" + ## 'auto-discovery' init container + ## Used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param defaultInitContainers.autoDiscovery.enabled Enable init container that auto-detects external IPs/ports by querying the K8s API + ## + enabled: true + ## Bitnami Kubectl image + ## @param defaultInitContainers.autoDiscovery.image.registry [default: REGISTRY_NAME] "auto-discovery" init-containers' image registry + ## @param defaultInitContainers.autoDiscovery.image.repository [default: REPOSITORY_NAME/os-shell] "auto-discovery" init-containers' image repository + ## @skip defaultInitContainers.autoDiscovery.image.tag "auto-discovery" init-containers' image tag (immutable tags are recommended) + ## @param defaultInitContainers.autoDiscovery.image.digest "auto-discovery" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param defaultInitContainers.autoDiscovery.image.pullPolicy "auto-discovery" init-containers' image pull policy + ## @param defaultInitContainers.autoDiscovery.image.pullSecrets "auto-discovery" init-containers' image pull secrets + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/kubectl + tag: "1.27" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure "auto-discovery" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.enabled Enabled "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "auto-discovery" init-containers + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsUser Set runAsUser in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsGroup Set runAsUser in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.runAsNonRoot Set runAsNonRoot in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem Set readOnlyRootFilesystem in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.privileged Set privileged in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "auto-discovery" init-containers' Security Context + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.add List of capabilities to be added in "auto-discovery" init-containers + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "auto-discovery" init-containers + ## @param defaultInitContainers.autoDiscovery.containerSecurityContext.seccompProfile.type Set seccomp profile in "auto-discovery" init-containers + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + add: [] + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Kafka "auto-discovery" init container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.autoDiscovery.resourcesPreset Set Kafka "auto-discovery" init container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.autoDiscovery.resources is set (defaultInitContainers.autoDiscovery.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param defaultInitContainers.autoDiscovery.resources Set Kafka "auto-discovery" init container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: false + ## @param controller.quorumBootstrapServers Override the Kafka controller quorum bootstrap servers of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-eligible nodes. + ## + quorumBootstrapServers: "" + ## @param controller.minId Minimal node.id values for controller-eligible nodes. Do not change after first initialization. + ## Broker-only id increment their ID starting at this minimal value. + ## We recommend setting this this value high enough, as IDs under this value will be used by controller-elegible nodes + ## + minId: 0 + ## @param controller.config Specify content for Kafka configuration for Kafka controller-eligible nodes (auto-generated based on other parameters otherwise) + ## NOTE: This will override the configuration based on values, please act carefully + ## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g: + ## process.roles: controller + ## ... will be transformed to: + ## process.roles=controller + ## + config: {} + ## @param controller.overrideConfiguration Kafka configuration override for Kafka controller-eligible nodes. Values defined here takes precedence over the ones defined at `controller.config` + ## + overrideConfiguration: {} + ## @param controller.existingConfigmap Name of an existing ConfigMap with the Kafka configuration for Kafka controller-eligible nodes + ## + existingConfigmap: "" + ## @param controller.secretConfig Additional configuration to be appended at the end of the generated Kafka configuration for Kafka controller-eligible nodes (store in a secret) + ## + secretConfig: "" + ## @param controller.existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka controller-eligible nodes + ## The key for the configuration should be: server-secret.properties + ## NOTE: This will override secretConfig value + ## + existingSecretConfig: "" + ## @param controller.heapOpts Kafka Java Heap configuration for controller-eligible nodes + ## + heapOpts: -XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75 + ## @param controller.command Override Kafka container command + ## + command: [] + ## @param controller.args Override Kafka container arguments + ## + args: [] + ## @param controller.extraEnvVars Extra environment variables to add to Kafka pods + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param controller.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param controller.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param controller.extraContainerPorts Kafka controller-eligible extra containerPorts. + ## + extraContainerPorts: [] + ## Configure extra options for Kafka containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param controller.livenessProbe.enabled Enable livenessProbe on Kafka containers + ## @param controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param controller.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param controller.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param controller.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + ## @param controller.readinessProbe.enabled Enable readinessProbe on Kafka containers + ## @param controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param controller.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param controller.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param controller.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + ## @param controller.startupProbe.enabled Enable startupProbe on Kafka containers + ## @param controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param controller.startupProbe.periodSeconds Period seconds for startupProbe + ## @param controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param controller.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param controller.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param controller.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param controller.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param controller.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param controller.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## Kafka resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Kafka pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param controller.podSecurityContext.enabled Enable security context for the pods + ## @param controller.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param controller.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param controller.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param controller.podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup + ## @param controller.podSecurityContext.seccompProfile.type Set Kafka pods's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param controller.containerSecurityContext.enabled Enable Kafka containers' Security Context + ## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param controller.containerSecurityContext.runAsGroup Set Kafka containers' Security Context runAsGroup + ## @param controller.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot + ## @param controller.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged + ## @param controller.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only + ## @param controller.containerSecurityContext.capabilities.drop Set Kafka containers' server Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param controller.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: true + ## @param controller.hostAliases Kafka pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param controller.hostNetwork Specify if host network should be enabled for Kafka pods + ## + hostNetwork: false + ## @param controller.hostIPC Specify if host IPC should be enabled for Kafka pods + ## + hostIPC: false + ## @param controller.podLabels Extra labels for Kafka pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param controller.podAnnotations Extra annotations for Kafka pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param controller.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param controller.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param controller.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param controller.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param controller.nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param controller.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param controller.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param controller.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param controller.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param controller.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param controller.terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate + ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution + ## + terminationGracePeriodSeconds: "" + ## @param controller.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param controller.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param controller.priorityClassName Name of the existing priority class to be used by kafka pods + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param controller.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param controller.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param controller.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param controller.updateStrategy.type Kafka statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param controller.extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param controller.sidecars Add additional sidecar containers to the Kafka pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param controller.initContainers Add additional Add init containers to the Kafka pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @section Experimental: Kafka Controller Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param controller.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param controller.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param controller.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param controller.autoscaling.hpa.enabled Enable HPA for Kafka Controller + ## + enabled: false + ## @param controller.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param controller.autoscaling.hpa.minReplicas Minimum number of Kafka Controller replicas + ## + minReplicas: "" + ## @param controller.autoscaling.hpa.maxReplicas Maximum number of Kafka Controller replicas + ## + maxReplicas: "" + ## @param controller.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param controller.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## Kafka Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param controller.pdb.create Deploy a pdb object for the Kafka pod + ## @param controller.pdb.minAvailable Minimum number/percentage of available Kafka replicas + ## @param controller.pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param controller.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param controller.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param controller.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param controller.persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param controller.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param controller.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param controller.persistence.labels Labels for the PVC + ## + labels: {} + ## @param controller.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param controller.persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /drycc/kafka + ## Log Persistence parameters + ## + logPersistence: + ## @param controller.logPersistence.enabled Enable Kafka logs persistence using PVC + ## + enabled: false + ## @param controller.logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param controller.logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param controller.logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param controller.logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param controller.logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param controller.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param controller.logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/drycc/kafka/logs + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 0 + ## @param broker.minId Minimal node.id values for broker-only nodes. Do not change after first initialization. + ## Broker-only id increment their ID starting at this minimal value. + ## We recommend setting this this value high enough, as IDs under this value will be used by controller-eligible nodes + ## + ## + minId: 100 + ## @param broker.config Specify content for Kafka configuration for Kafka broker-only nodes (auto-generated based on other parameters otherwise) + ## NOTE: This will override the configuration based on values, please act carefully + ## Use simple key-value YAML format, then it's transformed to properties format by the chart. e.g: + ## process.roles: broker + ## ... will be transformed to: + ## process.roles=broker + ## + config: {} + ## @param broker.overrideConfiguration Kafka configuration override for Kafka broker-only nodes. Values defined here takes precedence over the ones defined at `broker.config` + ## + overrideConfiguration: {} + ## @param broker.existingConfigmap Name of an existing ConfigMap with the Kafka configuration for Kafka broker-only nodes + ## + existingConfigmap: "" + ## @param broker.secretConfig Additional configuration to be appended at the end of the generated Kafka configuration for Kafka broker-only nodes (store in a secret) + ## + secretConfig: "" + ## @param broker.existingSecretConfig Secret with additional configuration that will be appended to the end of the generated Kafka configuration for Kafka broker-only nodes + ## The key for the configuration should be: server-secret.properties + ## NOTE: This will override secretConfig value + ## + existingSecretConfig: "" + ## @param broker.heapOpts Kafka Java Heap configuration for broker-only nodes + ## + heapOpts: -XX:InitialRAMPercentage=75 -XX:MaxRAMPercentage=75 + ## @param broker.command Override Kafka container command + ## + command: [] + ## @param broker.args Override Kafka container arguments + ## + args: [] + ## @param broker.extraEnvVars Extra environment variables to add to Kafka pods + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param broker.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param broker.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param broker.extraContainerPorts Kafka broker-only extra containerPorts. + ## + extraContainerPorts: [] + ## Configure extra options for Kafka containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param broker.livenessProbe.enabled Enable livenessProbe on Kafka containers + ## @param broker.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param broker.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param broker.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param broker.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param broker.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + ## @param broker.readinessProbe.enabled Enable readinessProbe on Kafka containers + ## @param broker.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param broker.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param broker.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param broker.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param broker.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + ## @param broker.startupProbe.enabled Enable startupProbe on Kafka containers + ## @param broker.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param broker.startupProbe.periodSeconds Period seconds for startupProbe + ## @param broker.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param broker.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param broker.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param broker.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param broker.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param broker.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param broker.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## Kafka resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param broker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Kafka pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param broker.podSecurityContext.enabled Enable security context for the pods + ## @param broker.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param broker.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param broker.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param broker.podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup + ## @param broker.podSecurityContext.seccompProfile.type Set Kafka pod's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param broker.containerSecurityContext.enabled Enable Kafka containers' Security Context + ## @param broker.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param broker.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param broker.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param broker.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot + ## @param broker.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged + ## @param broker.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only + ## @param broker.containerSecurityContext.capabilities.drop Set Kafka containers' server Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param broker.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: true + ## @param broker.hostAliases Kafka pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param broker.hostNetwork Specify if host network should be enabled for Kafka pods + ## + hostNetwork: false + ## @param broker.hostIPC Specify if host IPC should be enabled for Kafka pods + ## + hostIPC: false + ## @param broker.podLabels Extra labels for Kafka pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param broker.podAnnotations Extra annotations for Kafka pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param broker.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param broker.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param broker.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param broker.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param broker.nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param broker.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param broker.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param broker.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param broker.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param broker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param broker.terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate + ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution + ## + terminationGracePeriodSeconds: "" + ## @param broker.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param broker.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param broker.priorityClassName Name of the existing priority class to be used by kafka pods + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param broker.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param broker.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param broker.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param broker.updateStrategy.type Kafka statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param broker.extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param broker.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param broker.sidecars Add additional sidecar containers to the Kafka pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param broker.initContainers Add additional Add init containers to the Kafka pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## @param broker.pdb.create Deploy a pdb object for the Kafka pod + ## @param broker.pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas + ## @param broker.pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @section Experimental: Kafka Broker Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param broker.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param broker.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param broker.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param broker.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param broker.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param broker.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param broker.autoscaling.hpa.enabled Enable HPA for Kafka Broker + ## + enabled: false + ## @param broker.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param broker.autoscaling.hpa.minReplicas Minimum number of Kafka Broker replicas + ## + minReplicas: "" + ## @param broker.autoscaling.hpa.maxReplicas Maximum number of Kafka Broker replicas + ## + maxReplicas: "" + ## @param broker.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param broker.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param broker.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param broker.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param broker.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: true + whenScaled: Retain + whenDeleted: Delete + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param broker.persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param broker.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param broker.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param broker.persistence.labels Labels for the PVC + ## + labels: {} + ## @param broker.persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param broker.persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /drycc/kafka + ## Log Persistence parameters + ## + logPersistence: + ## @param broker.logPersistence.enabled Enable Kafka logs persistence using PVC + ## + enabled: false + ## @param broker.logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param broker.logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param broker.logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param broker.logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param broker.logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param broker.logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param broker.logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/drycc/kafka/logs + +## @section Traffic Exposure parameters +## + +## Service parameters +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.controller Kafka svc port for controller connections + ## @param service.ports.interbroker Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + controller: 9093 + interbroker: 9094 + external: 9095 + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + client: "" + external: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerClass Kafka service Load Balancer Class + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.allocateLoadBalancerNodePorts Whether to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service + ## + annotations: {} + ## Headless service properties + ## + headless: + controller: + ## @param service.headless.controller.annotations Annotations for the controller-eligible headless service. + ## + annotations: {} + ## @param service.headless.controller.labels Labels for the controller-eligible headless service. + ## + labels: {} + broker: + ## @param service.headless.broker.annotations Annotations for the broker-only headless service. + ## + annotations: {} + ## @param service.headless.broker.labels Labels for the broker-only headless service. + ## + labels: {} + ## @param service.headless.ipFamilies IP families for the headless service + ## + ipFamilies: [] + ## @param service.headless.ipFamilyPolicy IP family policy for the headless service + ## + ipFamilyPolicy: "" +## External Access to Kafka brokers configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + ## Service settings + controller: + ## @param externalAccess.controller.forceExpose If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes + ## + forceExpose: false + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.controller.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.controller.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.controller.service.loadBalancerClass Kubernetes Service Load Balancer class for external access when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param externalAccess.controller.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.controller.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.controller.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.controller.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.controller.service.allocateLoadBalancerNodePorts Whether to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param externalAccess.controller.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.controller.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount + ## e.g: + ## externalIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + externalIPs: [] + ## @param externalAccess.controller.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.controller.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.controller.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.controller.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + ## + publishNotReadyAddresses: false + ## @param externalAccess.controller.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.controller.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.controller.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] + ## @param externalAccess.controller.service.ipFamilies IP families for the external controller service + ## + ipFamilies: [] + ## @param externalAccess.controller.service.ipFamilyPolicy IP family policy for the external controller service + ## + ipFamilyPolicy: "" + broker: + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.broker.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.broker.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.broker.service.loadBalancerClass Kubernetes Service Load Balancer class for external access when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param externalAccess.broker.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.broker.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.broker.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.broker.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.broker.service.allocateLoadBalancerNodePorts Whether to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param externalAccess.broker.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.broker.service.externalIPs Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount + ## e.g: + ## externalIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + externalIPs: [] + ## @param externalAccess.broker.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.broker.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.broker.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.broker.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + ## + publishNotReadyAddresses: false + ## @param externalAccess.broker.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.broker.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.broker.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] + ## @param externalAccess.broker.service.ipFamilies IP families for the external broker service + ## + ipFamilies: [] + ## @param externalAccess.broker.service.ipFamilyPolicy IP family policy for the external broker service + ## + ipFamilyPolicy: "" +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, Kafka accept connections from any source (with the correct destination port). + ## + allowExternal: false + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param networkPolicy.addExternalClientAccess Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. + ## + addExternalClientAccess: true + + allowCurrentNamespace: true + allowNamespaces: [] + + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressPodMatchLabels [object] Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. + ## e.g: + ## ingressPodMatchLabels: + ## my-client: "true" + # + ingressPodMatchLabels: {} + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Other Parameters + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: true + +## @section Metrics parameters + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus JMX exporter: exposes the majority of Kafka metrics + ## + jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## + enabled: false + ## @param metrics.jmx.kafkaJmxPort JMX port where the exporter will collect metrics, exposed in the Kafka container. + ## + kafkaJmxPort: 5555 + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry [default: REGISTRY_NAME] JMX exporter image registry + ## @param metrics.jmx.image.repository [default: REPOSITORY_NAME/jmx-exporter] JMX exporter image repository + ## @skip metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: registry.drycc.cc + repository: drycc-addons/jmx-exporter + tag: "1" + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## @param metrics.jmx.containerSecurityContext.allowPrivilegeEscalation Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem + ## @param metrics.jmx.containerSecurityContext.capabilities.drop Set Prometheus JMX exporter containers' Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param metrics.jmx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param metrics.jmx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param metrics.jmx.livenessProbe.enabled Enable livenessProbe + ## @param metrics.jmx.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.jmx.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.jmx.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.jmx.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.jmx.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param metrics.jmx.readinessProbe.enabled Enable readinessProbe + ## @param metrics.jmx.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.jmx.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.jmx.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.jmx.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.jmx.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus JMX exporter service configuration + ## + service: + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port + ## + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## @param metrics.jmx.service.ipFamilies IP families for the jmx metrics service + ## + ipFamilies: [] + ## @param metrics.jmx.service.ipFamilyPolicy IP family policy for the jmx metrics service + ## + ipFamilyPolicy: "" + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + ## @param metrics.jmx.config [string] Configuration file for JMX exporter + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:{{ .Values.metrics.jmx.kafkaJmxPort }}/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + existingConfigmap: "" + ## @param metrics.jmx.extraRules Add extra rules to JMX exporter configuration + ## e.g: + ## extraRules: |- + ## - pattern: kafka.server<>(connection-count) + ## name: kafka_server_socket_server_metrics_$3 + ## labels: + ## listener: $1 + ## + extraRules: "" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.path Path where JMX exporter serves metrics + ## + path: /metrics + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.groups Prometheus Rule Groups for Kafka + ## + groups: [] + +## @section Kafka provisioning parameters +## + +## Kafka provisioning +## +provisioning: + ## @param provisioning.enabled Enable Kafka provisioning Job + ## + enabled: false + ## @param provisioning.waitForKafka Whether an init container should be created to wait until Kafka is ready before provisioning + ## + waitForKafka: true + ## @param provisioning.useHelmHooks Flag to indicate usage of helm hooks + ## + useHelmHooks: true + ## @param provisioning.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision + ## - name: topic-name + ## partitions: 1 + ## replicationFactor: 1 + ## ## https://kafka.apache.org/documentation/#topicconfigs + ## config: + ## max.message.bytes: 64000 + ## flush.messages: 1 + ## + topics: [] + ## @param provisioning.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/drycc/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config /shared/client.properties + ## --add + ## --allow-principal User:user + ## --consumer --topic * + ## - "/opt/drycc/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config /shared/client.properties + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. /shared/client.properties is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. /shared/client.properties is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. + ## Note: ignored if auth.tls.client.protocol different from one of these values: "SSL" "SASL_SSL" + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain one of the following: + ## 1. A public CA certificate, a public certificate and one private key. + ## 2. A truststore and a keystore in PEM format + ## If caCert is set, option 1 will be taken, otherwise option 2. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param provisioning.serviceAccount.create Enable creation of ServiceAccount for Kafka provisioning pods + ## + create: true + ## @param provisioning.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the provisioning.serviceAccount.name template + ## + name: "" + ## @param provisioning.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param provisioning.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param provisioning.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param provisioning.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param provisioning.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## @param provisioning.podSecurityContext.seccompProfile.type Set Kafka provisioning pod's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param provisioning.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## @param provisioning.containerSecurityContext.allowPrivilegeEscalation Set Kafka provisioning containers' Security Context allowPrivilegeEscalation + ## @param provisioning.containerSecurityContext.readOnlyRootFilesystem Set Kafka provisioning containers' Security Context readOnlyRootFilesystem + ## @param provisioning.containerSecurityContext.capabilities.drop Set Kafka provisioning containers' Security Context capabilities to be dropped + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## The environment variables injected by service links are not used, but can lead to slow kafka boot times or slow running of the scripts when there are many services in the current namespace. + ## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. + ## + enableServiceLinks: true + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] diff --git a/addons/kafka/4.2/meta.yaml b/addons/kafka/4.2/meta.yaml new file mode 100644 index 00000000..d1691c4a --- /dev/null +++ b/addons/kafka/4.2/meta.yaml @@ -0,0 +1,99 @@ +name: kafka-4.2 +version: 4.2 +id: 0ff645c0-54af-4896-80d6-34ae0cb0ca7d +description: "kafka-4.2" +displayName: "kafka-4.2" +metadata: + displayName: "kafka-4.2" + provider: + name: drycc + supportURL: https://kafka.apache.org/ + documentationURL: https://github.com/drycc-addons/containers/tree/main/containers/kafka +tags: kafka +bindable: true +instances_retrievable: true +bindings_retrievable: true +plan_updateable: true +allow_parameters: +- name: "extraEnvVars" + required: false + description: "extraEnvVars config for values.yaml" +- name: "extraConfig" + required: false + description: "extraConfig config for values.yaml" +- name: "controller.extraConfig" + required: false + description: "controller.extraConfig config for values.yaml" +- name: "controller.extraEnvVars" + required: false + description: "controller.extraEnvVars config for values.yaml" +- name: "controller.heapOpts" + required: false + description: "controller.heapOpts config for values.yaml" +- name: "controller.replicaCount" + required: false + description: "controller.replicaCount config for values.yaml" +- name: "controller.controllerOnly" + required: false + description: "controller.controllerOnly config for values.yaml" +- name: "controller.nodeSelector" + required: false + description: "controller.nodeSelector config for values.yaml" +- name: "controller.persistence" + required: false + description: "controller.persistence config for values.yaml" +- name: "broker.replicaCount" + required: false + description: "broker.replicaCount config for values.yaml" +- name: "broker.extraConfig" + required: false + description: "broker.extraConfig config for values.yaml" +- name: "broker.extraEnvVars" + required: false + description: "broker.extraEnvVars config for values.yaml" +- name: "broker.heapOpts" + required: false + description: "broker.heapOpts config for values.yaml" +- name: "broker.nodeSelector" + required: false + description: "broker.nodeSelector config for values.yaml" +- name: "broker.persistence" + required: false + description: "broker.persistence config for values.yaml" +- name: "listeners.client.protocol" + required: false + description: "listeners client protocol config for values.yaml" +- name: "listeners.controller.protocol" + required: false + description: "listeners controller protocol config for values.yaml" +- name: "listeners.interbroker.protocol" + required: false + description: "listeners interbroker protocol config for values.yaml" +- name: "listeners.external.protocol" + required: false + description: "listeners external protocol config for values.yaml" +- name: "sasl" + required: false + description: "sasl config for values.yaml" +- name: "networkPolicy.allowNamespaces" + required: false + description: "networkPolicy allowNamespaces config for values.yaml" +- name: "service.type" + required: false + description: "service type config for values.yaml" +- name: "service.externalTrafficPolicy" + required: false + description: "service externalTrafficPolicy config for values.yaml" +- name: "externalAccess.enabled" + required: false + description: "externalAccess enabled or not config for values.yaml" +- name: "listeners.advertisedListeners" + required: false + description: "listeners advertisedListeners or not config for values.yaml" +- name: "metrics.jmx.enabled" + required: false + description: "metrics jmx enabled or not config for values.yaml" +- name: "metrics.nodeSelector" + required: false + description: "metrics nodeSelector config for values.yaml" +archive: false diff --git a/addons/kafka/4.2/plans/standard-16c32g3w/bind.yaml b/addons/kafka/4.2/plans/standard-16c32g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-16c32g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-16c32g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-16c32g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-16c32g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-16c32g3w/meta.yaml b/addons/kafka/4.2/plans/standard-16c32g3w/meta.yaml new file mode 100644 index 00000000..ed8d3d81 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-16c32g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-16c32g3w" +id: 506965ca-6d49-45ca-90aa-86fba579150b +description: "Kafka standard-16c32g3w plan which limit resources 3 brokers per broker 16 cores, memory size 32Gi persistence size 768Gi." +displayName: "standard-16c32g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-16c32g3w/values.yaml b/addons/kafka/4.2/plans/standard-16c32g3w/values.yaml new file mode 100644 index 00000000..c670af8e --- /dev/null +++ b/addons/kafka/4.2/plans/standard-16c32g3w/values.yaml @@ -0,0 +1,57 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kafka-standard-16c32g3w + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 2 + memory: 4Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 64Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 16 + memory: 32Gi + requests: + cpu: 2 + memory: 4Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 768Gi diff --git a/addons/kafka/4.2/plans/standard-1c2g3w/bind.yaml b/addons/kafka/4.2/plans/standard-1c2g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-1c2g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-1c2g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-1c2g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-1c2g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-1c2g3w/meta.yaml b/addons/kafka/4.2/plans/standard-1c2g3w/meta.yaml new file mode 100644 index 00000000..1e28c8d7 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-1c2g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-1c2g3w" +id: 83ca1043-39cb-4508-8723-0977e38a8882 +description: "Kafka standard-1c2g3w plan which limit resources 3 brokers per broker 1 core, memory size 2Gi and persistence size 16Gi." +displayName: "standard-1c2g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-1c2g3w/values.yaml b/addons/kafka/4.2/plans/standard-1c2g3w/values.yaml new file mode 100644 index 00000000..bff6b7e8 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-1c2g3w/values.yaml @@ -0,0 +1,57 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "hb-kafka-standard-1c2g3w" + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 16Gi diff --git a/addons/kafka/4.2/plans/standard-24c64g3w/bind.yaml b/addons/kafka/4.2/plans/standard-24c64g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-24c64g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-24c64g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-24c64g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-24c64g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-24c64g3w/meta.yaml b/addons/kafka/4.2/plans/standard-24c64g3w/meta.yaml new file mode 100644 index 00000000..7bbadab1 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-24c64g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-24c64g3w" +id: 30259029-8e15-4dd7-97f9-f7811a6263a4 +description: "Kafka standard-24c64g3w plan which limit resources 3 brokers per broker 24 cores, memory size 64Gi persistence size 1Ti." +displayName: "standard-24c64g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-24c64g3w/values.yaml b/addons/kafka/4.2/plans/standard-24c64g3w/values.yaml new file mode 100644 index 00000000..c5f9a37b --- /dev/null +++ b/addons/kafka/4.2/plans/standard-24c64g3w/values.yaml @@ -0,0 +1,57 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kafka-standard-24c64g3w + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 128Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 24 + memory: 64Gi + requests: + cpu: 3 + memory: 8Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 1Ti diff --git a/addons/kafka/4.2/plans/standard-2c4g3w/bind.yaml b/addons/kafka/4.2/plans/standard-2c4g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-2c4g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-2c4g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-2c4g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-2c4g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-2c4g3w/meta.yaml b/addons/kafka/4.2/plans/standard-2c4g3w/meta.yaml new file mode 100644 index 00000000..0fac07fb --- /dev/null +++ b/addons/kafka/4.2/plans/standard-2c4g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-2c4g3w" +id: 36cde06c-b56e-4511-9462-0c5b9c9638c0 +description: "Kafka standard-2c4g3w plan which limit resources 3 brokers per broker 2 cores, memory size 4Gi and persistence size 64Gi." +displayName: "standard-2c4g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-2c4g3w/values.yaml b/addons/kafka/4.2/plans/standard-2c4g3w/values.yaml new file mode 100644 index 00000000..a28c9502 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-2c4g3w/values.yaml @@ -0,0 +1,57 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kafka-standard-2c4g3w + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 400m + memory: 512Mi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 16Gi diff --git a/addons/kafka/4.2/plans/standard-4c8g3w/bind.yaml b/addons/kafka/4.2/plans/standard-4c8g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-4c8g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-4c8g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-4c8g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-4c8g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-4c8g3w/meta.yaml b/addons/kafka/4.2/plans/standard-4c8g3w/meta.yaml new file mode 100644 index 00000000..0541498f --- /dev/null +++ b/addons/kafka/4.2/plans/standard-4c8g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-4c8g3w" +id: db8fc3e0-44d9-40b4-88ad-d94133e2f746 +description: "Kafka standard-4c8g3w plan which limit resources 3 brokers per broker 4 cores, memory size 8Gi and persistence size 256Gi." +displayName: "standard-4c8g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-4c8g3w/values.yaml b/addons/kafka/4.2/plans/standard-4c8g3w/values.yaml new file mode 100644 index 00000000..34402a8c --- /dev/null +++ b/addons/kafka/4.2/plans/standard-4c8g3w/values.yaml @@ -0,0 +1,57 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kafka-standard-4c8g3w + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 32Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 4 + memory: 8Gi + requests: + cpu: 500m + memory: 1Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 256Gi diff --git a/addons/kafka/4.2/plans/standard-8c16g3w/bind.yaml b/addons/kafka/4.2/plans/standard-8c16g3w/bind.yaml new file mode 100644 index 00000000..3d89e772 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-8c16g3w/bind.yaml @@ -0,0 +1,109 @@ +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $clientPort := int .Values.listeners.client.containerPort }} +credential: + - name: PROTOCOL_MAP + value: {{ include "kafka.securityProtocolMap" (dict "isController" false "context" .) }} + + - name: CLIENT_DOMAIN + value: {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + - name: CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.clusterIP }' + + - name: CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + + {{- if (eq .Values.service.type "LoadBalancer") }} + - name: EXTERNAL_CLIENT_HOST + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + + - name: EXTERNAL_CLIENT_PORT + valueFrom: + serviceRef: + name: {{ template "common.names.fullname" . }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + {{- $brokerList := list }} + {{- if .Values.controller.controllerOnly }} + {{- range $i := until (int .Values.broker.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-broker-%d.%s-broker-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- else }} + {{- range $i := until (int .Values.controller.replicaCount) }} + {{- $brokerList = append $brokerList (printf "%s-controller-%d.%s-controller-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain ) }} + {{- end }} + {{- end }} + {{- range $i, $broker := ( $brokerList )}} + - name: {{ printf "KAFKA_NODE_%d" $i }} + value: {{ $broker }} + {{- end }} + + {{- $replicaCount := int .Values.broker.replicaCount }} + {{ if gt $replicaCount 0 }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-broker-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- else }} + - name: KAFKA_NODE_PORT + valueFrom: + serviceRef: + name: {{ printf "%s-controller-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .spec.ports[?(@.name=="tcp-client")].port }' + {{- end }} + + + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} + {{ if (include "kafka.client.saslEnabled" .) }} + - name: CLIENT_USERS + value: {{ join "," .Values.sasl.client.users | quote }} + + - name: CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ printf "%s-user-passwords" (include "common.names.fullname" .) }} + jsonpath: '{ .data.client-passwords }' + {{- end }} + {{- end }} + + + {{- if .Values.externalAccess.enabled }} + {{- if .Values.controller.controllerOnly }} + {{- $replicaCount := .Values.broker.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-broker-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.broker.service.ports.external }} + {{- else }} + {{- $replicaCount := .Values.controller.replicaCount | int }} + {{- range $i := until $replicaCount }} + - name: {{ printf "EXTERNAL_KAFKA_NODE_%d" $i }} + valueFrom: + serviceRef: + name: {{ printf "%s-controller-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + jsonpath: '{ .status.loadBalancer.ingress[*].ip }' + {{- end }} + + - name: EXTERNAL_KAFKA_NODE_PORT + value: {{ .Values.externalAccess.controller.service.ports.external }} + {{- end }} +{{- end }} diff --git a/addons/kafka/4.2/plans/standard-8c16g3w/instance-schema.json b/addons/kafka/4.2/plans/standard-8c16g3w/instance-schema.json new file mode 100644 index 00000000..66ebbaa0 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-8c16g3w/instance-schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "imagePullPolicy": { + "type": "string", + "enum": ["Always", "IfNotPresent", "Never"], + "default": "IfNotPresent", + "title": "Image pull policy" + } + } +} \ No newline at end of file diff --git a/addons/kafka/4.2/plans/standard-8c16g3w/meta.yaml b/addons/kafka/4.2/plans/standard-8c16g3w/meta.yaml new file mode 100644 index 00000000..f4551215 --- /dev/null +++ b/addons/kafka/4.2/plans/standard-8c16g3w/meta.yaml @@ -0,0 +1,6 @@ +name: "standard-8c16g3w" +id: 5607275af-3ad5-42b3-9d10-ffae06c80079 +description: "Kafka standard-8c16g3w plan which limit resources 3 brokers per broker 8 cores, memory size 16Gi persistence size 512Gi." +displayName: "standard-8c16g3w" +bindable: true +maximum_polling_duration: 1800 diff --git a/addons/kafka/4.2/plans/standard-8c16g3w/values.yaml b/addons/kafka/4.2/plans/standard-8c16g3w/values.yaml new file mode 100644 index 00000000..a082590a --- /dev/null +++ b/addons/kafka/4.2/plans/standard-8c16g3w/values.yaml @@ -0,0 +1,59 @@ +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: hb-kafka-standard-8c16g3w + +## @section Controller-eligible statefulset parameters +## +controller: + ## @param controller.replicaCount Number of Kafka controller-eligible nodes + ## + replicaCount: 3 + ## @param controller.controllerOnly If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. + ## + controllerOnly: true + ## + ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 1 + memory: 2Gi + + + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param controller.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param controller.persistence.size PVC Storage Request for Kafka data volume + ## + size: 32Gi + +## @section Broker-only statefulset parameters +## +broker: + ## @param broker.replicaCount Number of Kafka broker-only nodes + ## + replicaCount: 3 + ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + limits: + cpu: 8 + memory: 16Gi + requests: + cpu: 1 + memory: 2Gi + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ + ## + persistence: + ## @param broker.persistence.enabled Enable Kafka data persistence using PVC + ## + enabled: true + ## @param broker.persistence.size PVC Storage Request for Kafka data volume + ## + size: 512Gi From 746ec5e5bdda94846cbf99e25fb77fd561533b3d Mon Sep 17 00:00:00 2001 From: lijianguo Date: Fri, 27 Mar 2026 10:00:25 +0800 Subject: [PATCH 93/93] chore(adddons): update kafka appversion redis probe timeout --- addons/kafka/4.2/chart/kafka-4.2/Chart.yaml | 2 +- addons/redis/7.0/chart/redis/values.yaml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml b/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml index 4ab73fdc..e4a7afa3 100644 --- a/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml +++ b/addons/kafka/4.2/chart/kafka-4.2/Chart.yaml @@ -5,7 +5,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 4.2 +appVersion: 4.2.0 dependencies: - name: common repository: oci://registry.drycc.cc/charts diff --git a/addons/redis/7.0/chart/redis/values.yaml b/addons/redis/7.0/chart/redis/values.yaml index 6dcbef97..4c93a2ed 100644 --- a/addons/redis/7.0/chart/redis/values.yaml +++ b/addons/redis/7.0/chart/redis/values.yaml @@ -999,8 +999,8 @@ sentinel: startupProbe: enabled: true initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 successThreshold: 1 failureThreshold: 22 ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis™ Sentinel nodes @@ -1013,8 +1013,8 @@ sentinel: livenessProbe: enabled: true initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 successThreshold: 1 failureThreshold: 5 ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis™ Sentinel nodes @@ -1027,8 +1027,8 @@ sentinel: readinessProbe: enabled: true initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 successThreshold: 1 failureThreshold: 5 ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one