yawyd 1 year ago
commit
d5fddb15c7
100 changed files with 12235 additions and 0 deletions
  1. 21 0
      clickhouse/.helmignore
  2. 9 0
      clickhouse/Chart.lock
  3. 31 0
      clickhouse/Chart.yaml
  4. 512 0
      clickhouse/README.md
  5. 22 0
      clickhouse/charts/common/.helmignore
  6. 24 0
      clickhouse/charts/common/Chart.yaml
  7. 233 0
      clickhouse/charts/common/README.md
  8. 106 0
      clickhouse/charts/common/templates/_affinities.tpl
  9. 154 0
      clickhouse/charts/common/templates/_capabilities.tpl
  10. 23 0
      clickhouse/charts/common/templates/_errors.tpl
  11. 80 0
      clickhouse/charts/common/templates/_images.tpl
  12. 68 0
      clickhouse/charts/common/templates/_ingress.tpl
  13. 18 0
      clickhouse/charts/common/templates/_labels.tpl
  14. 66 0
      clickhouse/charts/common/templates/_names.tpl
  15. 165 0
      clickhouse/charts/common/templates/_secrets.tpl
  16. 23 0
      clickhouse/charts/common/templates/_storage.tpl
  17. 13 0
      clickhouse/charts/common/templates/_tplvalues.tpl
  18. 62 0
      clickhouse/charts/common/templates/_utils.tpl
  19. 14 0
      clickhouse/charts/common/templates/_warnings.tpl
  20. 72 0
      clickhouse/charts/common/templates/validations/_cassandra.tpl
  21. 103 0
      clickhouse/charts/common/templates/validations/_mariadb.tpl
  22. 108 0
      clickhouse/charts/common/templates/validations/_mongodb.tpl
  23. 103 0
      clickhouse/charts/common/templates/validations/_mysql.tpl
  24. 129 0
      clickhouse/charts/common/templates/validations/_postgresql.tpl
  25. 76 0
      clickhouse/charts/common/templates/validations/_redis.tpl
  26. 46 0
      clickhouse/charts/common/templates/validations/_validations.tpl
  27. 5 0
      clickhouse/charts/common/values.yaml
  28. 21 0
      clickhouse/charts/zookeeper/.helmignore
  29. 6 0
      clickhouse/charts/zookeeper/Chart.lock
  30. 25 0
      clickhouse/charts/zookeeper/Chart.yaml
  31. 524 0
      clickhouse/charts/zookeeper/README.md
  32. 22 0
      clickhouse/charts/zookeeper/charts/common/.helmignore
  33. 24 0
      clickhouse/charts/zookeeper/charts/common/Chart.yaml
  34. 233 0
      clickhouse/charts/zookeeper/charts/common/README.md
  35. 106 0
      clickhouse/charts/zookeeper/charts/common/templates/_affinities.tpl
  36. 154 0
      clickhouse/charts/zookeeper/charts/common/templates/_capabilities.tpl
  37. 23 0
      clickhouse/charts/zookeeper/charts/common/templates/_errors.tpl
  38. 80 0
      clickhouse/charts/zookeeper/charts/common/templates/_images.tpl
  39. 68 0
      clickhouse/charts/zookeeper/charts/common/templates/_ingress.tpl
  40. 18 0
      clickhouse/charts/zookeeper/charts/common/templates/_labels.tpl
  41. 66 0
      clickhouse/charts/zookeeper/charts/common/templates/_names.tpl
  42. 165 0
      clickhouse/charts/zookeeper/charts/common/templates/_secrets.tpl
  43. 23 0
      clickhouse/charts/zookeeper/charts/common/templates/_storage.tpl
  44. 13 0
      clickhouse/charts/zookeeper/charts/common/templates/_tplvalues.tpl
  45. 62 0
      clickhouse/charts/zookeeper/charts/common/templates/_utils.tpl
  46. 14 0
      clickhouse/charts/zookeeper/charts/common/templates/_warnings.tpl
  47. 72 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl
  48. 103 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl
  49. 108 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl
  50. 103 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_mysql.tpl
  51. 129 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl
  52. 76 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_redis.tpl
  53. 46 0
      clickhouse/charts/zookeeper/charts/common/templates/validations/_validations.tpl
  54. 5 0
      clickhouse/charts/zookeeper/charts/common/values.yaml
  55. 76 0
      clickhouse/charts/zookeeper/templates/NOTES.txt
  56. 361 0
      clickhouse/charts/zookeeper/templates/_helpers.tpl
  57. 17 0
      clickhouse/charts/zookeeper/templates/configmap.yaml
  58. 4 0
      clickhouse/charts/zookeeper/templates/extra-list.yaml
  59. 29 0
      clickhouse/charts/zookeeper/templates/metrics-svc.yaml
  60. 41 0
      clickhouse/charts/zookeeper/templates/networkpolicy.yaml
  61. 26 0
      clickhouse/charts/zookeeper/templates/pdb.yaml
  62. 27 0
      clickhouse/charts/zookeeper/templates/prometheusrule.yaml
  63. 102 0
      clickhouse/charts/zookeeper/templates/scripts-configmap.yaml
  64. 77 0
      clickhouse/charts/zookeeper/templates/secrets.yaml
  65. 21 0
      clickhouse/charts/zookeeper/templates/serviceaccount.yaml
  66. 53 0
      clickhouse/charts/zookeeper/templates/servicemonitor.yaml
  67. 532 0
      clickhouse/charts/zookeeper/templates/statefulset.yaml
  68. 42 0
      clickhouse/charts/zookeeper/templates/svc-headless.yaml
  69. 71 0
      clickhouse/charts/zookeeper/templates/svc.yaml
  70. 57 0
      clickhouse/charts/zookeeper/templates/tls-secrets.yaml
  71. 879 0
      clickhouse/charts/zookeeper/values.yaml
  72. 58 0
      clickhouse/templates/NOTES.txt
  73. 202 0
      clickhouse/templates/_helpers.tpl
  74. 18 0
      clickhouse/templates/configmap-extra.yaml
  75. 18 0
      clickhouse/templates/configmap.yaml
  76. 4 0
      clickhouse/templates/extra-list.yaml
  77. 45 0
      clickhouse/templates/ingress-tls-secrets.yaml
  78. 60 0
      clickhouse/templates/ingress.yaml
  79. 17 0
      clickhouse/templates/init-scripts-secret.yaml
  80. 32 0
      clickhouse/templates/scripts-configmap.yaml
  81. 18 0
      clickhouse/templates/secret.yaml
  82. 22 0
      clickhouse/templates/service-account.yaml
  83. 152 0
      clickhouse/templates/service-external-access.yaml
  84. 71 0
      clickhouse/templates/service-headless.yaml
  85. 154 0
      clickhouse/templates/service.yaml
  86. 49 0
      clickhouse/templates/servicemonitor.yaml
  87. 17 0
      clickhouse/templates/start-scripts-secret.yaml
  88. 414 0
      clickhouse/templates/statefulset.yaml
  89. 27 0
      clickhouse/templates/tls-secret.yaml
  90. 1070 0
      clickhouse/values.yaml
  91. 359 0
      deepflow/deepflow-ebpf-spring-demo.yaml
  92. 936 0
      deepflow/deepflow-otel-skywalking-demo.yaml
  93. 966 0
      deepflow/deepflow-otel-spring-demo.yaml
  94. 23 0
      deepflow/deepflow/.helmignore
  95. 15 0
      deepflow/deepflow/Chart.lock
  96. 33 0
      deepflow/deepflow/Chart.yaml
  97. 181 0
      deepflow/deepflow/README.md
  98. 23 0
      deepflow/deepflow/charts/clickhouse/.helmignore
  99. 6 0
      deepflow/deepflow/charts/clickhouse/Chart.yaml
  100. 255 0
      deepflow/deepflow/charts/clickhouse/templates/_affinity.tpl

+ 21 - 0
clickhouse/.helmignore

@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj

+ 9 - 0
clickhouse/Chart.lock

@@ -0,0 +1,9 @@
+dependencies:
+- name: zookeeper
+  repository: https://charts.bitnami.com/bitnami
+  version: 11.1.5
+- name: common
+  repository: https://charts.bitnami.com/bitnami
+  version: 2.2.4
+digest: sha256:a9cc33255fae632899c931e89126a7a0e9cec72fa758d499dd75f1ab752d1b0e
+generated: "2023-04-01T10:33:18.34925286Z"

+ 31 - 0
clickhouse/Chart.yaml

@@ -0,0 +1,31 @@
+annotations:
+  category: Database
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 23.3.1
+dependencies:
+- condition: zookeeper.enabled
+  name: zookeeper
+  repository: https://charts.bitnami.com/bitnami
+  version: 11.x.x
+- name: common
+  repository: https://charts.bitnami.com/bitnami
+  tags:
+  - bitnami-common
+  version: 2.x.x
+description: ClickHouse is an open-source column-oriented OLAP database management
+  system. Use it to boost your database performance while providing linear scalability
+  and hardware efficiency.
+home: https://clickhouse.com/
+icon: https://bitnami.com/assets/stacks/clickhouse/img/clickhouse-stack-220x234.png
+keywords:
+- database
+- sharding
+maintainers:
+- name: Bitnami
+  url: https://github.com/bitnami/charts
+name: clickhouse
+sources:
+- https://github.com/bitnami/containers/tree/main/bitnami/clickhouse
+- https://github.com/ClickHouse/ClickHouse
+version: 3.1.5

+ 512 - 0
clickhouse/README.md

@@ -0,0 +1,512 @@
+<!--- app-name: ClickHouse -->
+
+# ClickHouse packaged by Bitnami
+
+ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency.
+
+[Overview of ClickHouse](https://clickhouse.com/)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm repo add my-repo https://charts.bitnami.com/bitnami
+helm install my-release my-repo/clickhouse
+```
+
+## Introduction
+
+Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads.
+
+This chart bootstraps a [ClickHouse](https://github.com/clickhouse/clickhouse) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
+
+[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/get-started/).
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+- ReadWriteMany volumes for deployment scaling
+
+> If you are using Kubernetes 1.18, the following code needs to be commented out.
+> seccompProfile:
+> type: "RuntimeDefault"
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm repo add my-repo https://charts.bitnami.com/bitnami
+helm install my-release my-repo/clickhouse
+```
+
+The command deploys ClickHouse on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name                      | Description                                     | Value |
+| ------------------------- | ----------------------------------------------- | ----- |
+| `global.imageRegistry`    | Global Docker image registry                    | `""`  |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]`  |
+| `global.storageClass`     | Global StorageClass for Persistent Volume(s)    | `""`  |
+
+### Common parameters
+
+| Name                     | Description                                                                             | Value           |
+| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion`            | Override Kubernetes version                                                             | `""`            |
+| `nameOverride`           | String to partially override common.names.name                                          | `""`            |
+| `fullnameOverride`       | String to fully override common.names.fullname                                          | `""`            |
+| `namespaceOverride`      | String to fully override common.names.namespace                                         | `""`            |
+| `commonLabels`           | Labels to add to all deployed objects                                                   | `{}`            |
+| `commonAnnotations`      | Annotations to add to all deployed objects                                              | `{}`            |
+| `clusterDomain`          | Kubernetes cluster domain name                                                          | `cluster.local` |
+| `extraDeploy`            | Array of extra objects to deploy with the release                                       | `[]`            |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false`         |
+| `diagnosticMode.command` | Command to override all containers in the deployment                                    | `["sleep"]`     |
+| `diagnosticMode.args`    | Args to override all containers in the deployment                                       | `["infinity"]`  |
+
+### ClickHouse Parameters
+
+| Name                                                | Description                                                                                                | Value                 |
+| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------- |
+| `image.registry`                                    | ClickHouse image registry                                                                                  | `docker.io`           |
+| `image.repository`                                  | ClickHouse image repository                                                                                | `bitnami/clickhouse`  |
+| `image.tag`                                         | ClickHouse image tag (immutable tags are recommended)                                                      | `23.3.1-debian-11-r0` |
+| `image.digest`                                      | ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""`                  |
+| `image.pullPolicy`                                  | ClickHouse image pull policy                                                                               | `IfNotPresent`        |
+| `image.pullSecrets`                                 | ClickHouse image pull secrets                                                                              | `[]`                  |
+| `image.debug`                                       | Enable ClickHouse image debug mode                                                                         | `false`               |
+| `shards`                                            | Number of ClickHouse shards to deploy                                                                      | `2`                   |
+| `replicaCount`                                      | Number of ClickHouse replicas per shard to deploy                                                          | `3`                   |
+| `containerPorts.http`                               | ClickHouse HTTP container port                                                                             | `8123`                |
+| `containerPorts.https`                              | ClickHouse HTTPS container port                                                                            | `8443`                |
+| `containerPorts.tcp`                                | ClickHouse TCP container port                                                                              | `9000`                |
+| `containerPorts.tcpSecure`                          | ClickHouse TCP (secure) container port                                                                     | `9440`                |
+| `containerPorts.keeper`                             | ClickHouse keeper TCP container port                                                                       | `2181`                |
+| `containerPorts.keeperSecure`                       | ClickHouse keeper TCP (secure) container port                                                              | `3181`                |
+| `containerPorts.keeperInter`                        | ClickHouse keeper interserver TCP container port                                                           | `9444`                |
+| `containerPorts.mysql`                              | ClickHouse MySQL container port                                                                            | `9004`                |
+| `containerPorts.postgresql`                         | ClickHouse PostgreSQL container port                                                                       | `9005`                |
+| `containerPorts.interserver`                        | ClickHouse Interserver container port                                                                      | `9009`                |
+| `containerPorts.metrics`                            | ClickHouse metrics container port                                                                          | `8001`                |
+| `livenessProbe.enabled`                             | Enable livenessProbe on ClickHouse containers                                                              | `true`                |
+| `livenessProbe.initialDelaySeconds`                 | Initial delay seconds for livenessProbe                                                                    | `10`                  |
+| `livenessProbe.periodSeconds`                       | Period seconds for livenessProbe                                                                           | `10`                  |
+| `livenessProbe.timeoutSeconds`                      | Timeout seconds for livenessProbe                                                                          | `1`                   |
+| `livenessProbe.failureThreshold`                    | Failure threshold for livenessProbe                                                                        | `3`                   |
+| `livenessProbe.successThreshold`                    | Success threshold for livenessProbe                                                                        | `1`                   |
+| `readinessProbe.enabled`                            | Enable readinessProbe on ClickHouse containers                                                             | `true`                |
+| `readinessProbe.initialDelaySeconds`                | Initial delay seconds for readinessProbe                                                                   | `10`                  |
+| `readinessProbe.periodSeconds`                      | Period seconds for readinessProbe                                                                          | `10`                  |
+| `readinessProbe.timeoutSeconds`                     | Timeout seconds for readinessProbe                                                                         | `1`                   |
+| `readinessProbe.failureThreshold`                   | Failure threshold for readinessProbe                                                                       | `3`                   |
+| `readinessProbe.successThreshold`                   | Success threshold for readinessProbe                                                                       | `1`                   |
+| `startupProbe.enabled`                              | Enable startupProbe on ClickHouse containers                                                               | `false`               |
+| `startupProbe.initialDelaySeconds`                  | Initial delay seconds for startupProbe                                                                     | `10`                  |
+| `startupProbe.periodSeconds`                        | Period seconds for startupProbe                                                                            | `10`                  |
+| `startupProbe.timeoutSeconds`                       | Timeout seconds for startupProbe                                                                           | `1`                   |
+| `startupProbe.failureThreshold`                     | Failure threshold for startupProbe                                                                         | `3`                   |
+| `startupProbe.successThreshold`                     | Success threshold for startupProbe                                                                         | `1`                   |
+| `customLivenessProbe`                               | Custom livenessProbe that overrides the default one                                                        | `{}`                  |
+| `customReadinessProbe`                              | Custom readinessProbe that overrides the default one                                                       | `{}`                  |
+| `customStartupProbe`                                | Custom startupProbe that overrides the default one                                                         | `{}`                  |
+| `resources.limits`                                  | The resources limits for the ClickHouse containers                                                         | `{}`                  |
+| `resources.requests`                                | The requested resources for the ClickHouse containers                                                      | `{}`                  |
+| `podSecurityContext.enabled`                        | Enabled ClickHouse pods' Security Context                                                                  | `true`                |
+| `podSecurityContext.fsGroup`                        | Set ClickHouse pod's Security Context fsGroup                                                              | `1001`                |
+| `podSecurityContext.seccompProfile.type`            | Set ClickHouse container's Security Context seccomp profile                                                | `RuntimeDefault`      |
+| `containerSecurityContext.enabled`                  | Enabled ClickHouse containers' Security Context                                                            | `true`                |
+| `containerSecurityContext.runAsUser`                | Set ClickHouse containers' Security Context runAsUser                                                      | `1001`                |
+| `containerSecurityContext.runAsNonRoot`             | Set ClickHouse containers' Security Context runAsNonRoot                                                   | `true`                |
+| `containerSecurityContext.allowPrivilegeEscalation` | Set ClickHouse container's privilege escalation                                                            | `false`               |
+| `containerSecurityContext.capabilities.drop`        | Set ClickHouse container's Security Context runAsNonRoot                                                   | `["ALL"]`             |
+| `auth.username`                                     | ClickHouse Admin username                                                                                  | `default`             |
+| `auth.password`                                     | ClickHouse Admin password                                                                                  | `""`                  |
+| `auth.existingSecret`                               | Name of a secret containing the Admin password                                                             | `""`                  |
+| `auth.existingSecretKey`                            | Name of the key inside the existing secret                                                                 | `""`                  |
+| `logLevel`                                          | Logging level                                                                                              | `information`         |
+
+### ClickHouse keeper configuration parameters
+
+| Name                            | Description                                                                                                              | Value                   |
+| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------- |
+| `keeper.enabled`                | Deploy ClickHouse keeper. Support is experimental.                                                                       | `false`                 |
+| `defaultConfigurationOverrides` | Default configuration overrides (evaluated as a template)                                                                | `""`                    |
+| `existingOverridesConfigmap`    | The name of an existing ConfigMap with your custom configuration for ClickHouse                                          | `""`                    |
+| `extraOverrides`                | Extra configuration overrides (evaluated as a template) apart from the default                                           | `""`                    |
+| `extraOverridesConfigmap`       | The name of an existing ConfigMap with extra configuration for ClickHouse                                                | `""`                    |
+| `extraOverridesSecret`          | The name of an existing ConfigMap with your custom configuration for ClickHouse                                          | `""`                    |
+| `initdbScripts`                 | Dictionary of initdb scripts                                                                                             | `{}`                    |
+| `initdbScriptsSecret`           | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`)                                                      | `""`                    |
+| `startdbScripts`                | Dictionary of startdb scripts                                                                                            | `{}`                    |
+| `startdbScriptsSecret`          | ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`)                                                    | `""`                    |
+| `command`                       | Override default container command (useful when using custom images)                                                     | `["/scripts/setup.sh"]` |
+| `args`                          | Override default container args (useful when using custom images)                                                        | `[]`                    |
+| `hostAliases`                   | ClickHouse pods host aliases                                                                                             | `[]`                    |
+| `podLabels`                     | Extra labels for ClickHouse pods                                                                                         | `{}`                    |
+| `podAnnotations`                | Annotations for ClickHouse pods                                                                                          | `{}`                    |
+| `podAffinityPreset`             | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                      | `""`                    |
+| `podAntiAffinityPreset`         | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                 | `soft`                  |
+| `nodeAffinityPreset.type`       | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                | `""`                    |
+| `nodeAffinityPreset.key`        | Node label key to match. Ignored if `affinity` is set                                                                    | `""`                    |
+| `nodeAffinityPreset.values`     | Node label values to match. Ignored if `affinity` is set                                                                 | `[]`                    |
+| `affinity`                      | Affinity for ClickHouse pods assignment                                                                                  | `{}`                    |
+| `nodeSelector`                  | Node labels for ClickHouse pods assignment                                                                               | `{}`                    |
+| `tolerations`                   | Tolerations for ClickHouse pods assignment                                                                               | `[]`                    |
+| `updateStrategy.type`           | ClickHouse statefulset strategy type                                                                                     | `RollingUpdate`         |
+| `podManagementPolicy`           | Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join                       | `Parallel`              |
+| `priorityClassName`             | ClickHouse pods' priorityClassName                                                                                       | `""`                    |
+| `topologySpreadConstraints`     | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]`                    |
+| `schedulerName`                 | Name of the k8s scheduler (other than default) for ClickHouse pods                                                       | `""`                    |
+| `terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully                                                                        | `""`                    |
+| `lifecycleHooks`                | for the ClickHouse container(s) to automate configuration before or after startup                                        | `{}`                    |
+| `extraEnvVars`                  | Array with extra environment variables to add to ClickHouse nodes                                                        | `[]`                    |
+| `extraEnvVarsCM`                | Name of existing ConfigMap containing extra env vars for ClickHouse nodes                                                | `""`                    |
+| `extraEnvVarsSecret`            | Name of existing Secret containing extra env vars for ClickHouse nodes                                                   | `""`                    |
+| `extraVolumes`                  | Optionally specify extra list of additional volumes for the ClickHouse pod(s)                                            | `[]`                    |
+| `extraVolumeMounts`             | Optionally specify extra list of additional volumeMounts for the ClickHouse container(s)                                 | `[]`                    |
+| `sidecars`                      | Add additional sidecar containers to the ClickHouse pod(s)                                                               | `[]`                    |
+| `initContainers`                | Add additional init containers to the ClickHouse pod(s)                                                                  | `[]`                    |
+| `tls.enabled`                   | Enable TLS traffic support                                                                                               | `false`                 |
+| `tls.autoGenerated`             | Generate automatically self-signed TLS certificates                                                                      | `false`                 |
+| `tls.certificatesSecret`        | Name of an existing secret that contains the certificates                                                                | `""`                    |
+| `tls.certFilename`              | Certificate filename                                                                                                     | `""`                    |
+| `tls.certKeyFilename`           | Certificate key filename                                                                                                 | `""`                    |
+| `tls.certCAFilename`            | CA Certificate filename                                                                                                  | `""`                    |
+
+### Traffic Exposure Parameters
+
+| Name                                              | Description                                                                                                                      | Value                    |
+| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
+| `service.type`                                    | ClickHouse service type                                                                                                          | `ClusterIP`              |
+| `service.ports.http`                              | ClickHouse service HTTP port                                                                                                     | `8123`                   |
+| `service.ports.https`                             | ClickHouse service HTTPS port                                                                                                    | `443`                    |
+| `service.ports.tcp`                               | ClickHouse service TCP port                                                                                                      | `9000`                   |
+| `service.ports.tcpSecure`                         | ClickHouse service TCP (secure) port                                                                                             | `9440`                   |
+| `service.ports.keeper`                            | ClickHouse keeper TCP container port                                                                                             | `2181`                   |
+| `service.ports.keeperSecure`                      | ClickHouse keeper TCP (secure) container port                                                                                    | `3181`                   |
+| `service.ports.keeperInter`                       | ClickHouse keeper interserver TCP container port                                                                                 | `9444`                   |
+| `service.ports.mysql`                             | ClickHouse service MySQL port                                                                                                    | `9004`                   |
+| `service.ports.postgresql`                        | ClickHouse service PostgreSQL port                                                                                               | `9005`                   |
+| `service.ports.interserver`                       | ClickHouse service Interserver port                                                                                              | `9009`                   |
+| `service.ports.metrics`                           | ClickHouse service metrics port                                                                                                  | `8001`                   |
+| `service.nodePorts.http`                          | Node port for HTTP                                                                                                               | `""`                     |
+| `service.nodePorts.https`                         | Node port for HTTPS                                                                                                              | `""`                     |
+| `service.nodePorts.tcp`                           | Node port for TCP                                                                                                                | `""`                     |
+| `service.nodePorts.tcpSecure`                     | Node port for TCP (with TLS)                                                                                                     | `""`                     |
+| `service.nodePorts.keeper`                        | ClickHouse keeper TCP container port                                                                                             | `""`                     |
+| `service.nodePorts.keeperSecure`                  | ClickHouse keeper TCP (secure) container port                                                                                    | `""`                     |
+| `service.nodePorts.keeperInter`                   | ClickHouse keeper interserver TCP container port                                                                                 | `""`                     |
+| `service.nodePorts.mysql`                         | Node port for MySQL                                                                                                              | `""`                     |
+| `service.nodePorts.postgresql`                    | Node port for PostgreSQL                                                                                                         | `""`                     |
+| `service.nodePorts.interserver`                   | Node port for Interserver                                                                                                        | `""`                     |
+| `service.nodePorts.metrics`                       | Node port for metrics                                                                                                            | `""`                     |
+| `service.clusterIP`                               | ClickHouse service Cluster IP                                                                                                    | `""`                     |
+| `service.loadBalancerIP`                          | ClickHouse service Load Balancer IP                                                                                              | `""`                     |
+| `service.loadBalancerSourceRanges`                | ClickHouse service Load Balancer sources                                                                                         | `[]`                     |
+| `service.externalTrafficPolicy`                   | ClickHouse service external traffic policy                                                                                       | `Cluster`                |
+| `service.annotations`                             | Additional custom annotations for ClickHouse service                                                                             | `{}`                     |
+| `service.extraPorts`                              | Extra ports to expose in ClickHouse service (normally used with the `sidecars` value)                                            | `[]`                     |
+| `service.sessionAffinity`                         | Control where client requests go, to the same pod or round-robin                                                                 | `None`                   |
+| `service.sessionAffinityConfig`                   | Additional settings for the sessionAffinity                                                                                      | `{}`                     |
+| `service.headless.annotations`                    | Annotations for the headless service.                                                                                            | `{}`                     |
+| `externalAccess.enabled`                          | Enable Kubernetes external cluster access to ClickHouse                                                                          | `false`                  |
+| `externalAccess.service.type`                     | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP                                       | `LoadBalancer`           |
+| `externalAccess.service.ports.http`               | ClickHouse service HTTP port                                                                                                     | `80`                     |
+| `externalAccess.service.ports.https`              | ClickHouse service HTTPS port                                                                                                    | `443`                    |
+| `externalAccess.service.ports.tcp`                | ClickHouse service TCP port                                                                                                      | `9000`                   |
+| `externalAccess.service.ports.tcpSecure`          | ClickHouse service TCP (secure) port                                                                                             | `9440`                   |
+| `externalAccess.service.ports.keeper`             | ClickHouse keeper TCP container port                                                                                             | `2181`                   |
+| `externalAccess.service.ports.keeperSecure`       | ClickHouse keeper TCP (secure) container port                                                                                    | `3181`                   |
+| `externalAccess.service.ports.keeperInter`        | ClickHouse keeper interserver TCP container port                                                                                 | `9444`                   |
+| `externalAccess.service.ports.mysql`              | ClickHouse service MySQL port                                                                                                    | `9004`                   |
+| `externalAccess.service.ports.postgresql`         | ClickHouse service PostgreSQL port                                                                                               | `9005`                   |
+| `externalAccess.service.ports.interserver`        | ClickHouse service Interserver port                                                                                              | `9009`                   |
+| `externalAccess.service.ports.metrics`            | ClickHouse service metrics port                                                                                                  | `8001`                   |
+| `externalAccess.service.loadBalancerIPs`          | Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount                                         | `[]`                     |
+| `externalAccess.service.loadBalancerAnnotations`  | Array of load balancer annotations for each ClickHouse . Length must be the same as replicaCount                                 | `[]`                     |
+| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer                                                                        | `[]`                     |
+| `externalAccess.service.nodePorts.http`           | Node port for HTTP                                                                                                               | `[]`                     |
+| `externalAccess.service.nodePorts.https`          | Node port for HTTPS                                                                                                              | `[]`                     |
+| `externalAccess.service.nodePorts.tcp`            | Node port for TCP                                                                                                                | `[]`                     |
+| `externalAccess.service.nodePorts.tcpSecure`      | Node port for TCP (with TLS)                                                                                                     | `[]`                     |
+| `externalAccess.service.nodePorts.keeper`         | ClickHouse keeper TCP container port                                                                                             | `[]`                     |
+| `externalAccess.service.nodePorts.keeperSecure`   | ClickHouse keeper TCP container port (with TLS)                                                                                  | `[]`                     |
+| `externalAccess.service.nodePorts.keeperInter`    | ClickHouse keeper interserver TCP container port                                                                                 | `[]`                     |
+| `externalAccess.service.nodePorts.mysql`          | Node port for MySQL                                                                                                              | `[]`                     |
+| `externalAccess.service.nodePorts.postgresql`     | Node port for PostgreSQL                                                                                                         | `[]`                     |
+| `externalAccess.service.nodePorts.interserver`    | Node port for Interserver                                                                                                        | `[]`                     |
+| `externalAccess.service.nodePorts.metrics`        | Node port for metrics                                                                                                            | `[]`                     |
+| `externalAccess.service.labels`                   | Service labels for external access                                                                                               | `{}`                     |
+| `externalAccess.service.annotations`              | Service annotations for external access                                                                                          | `{}`                     |
+| `externalAccess.service.extraPorts`               | Extra ports to expose in the ClickHouse external service                                                                         | `[]`                     |
+| `ingress.enabled`                                 | Enable ingress record generation for ClickHouse                                                                                  | `false`                  |
+| `ingress.pathType`                                | Ingress path type                                                                                                                | `ImplementationSpecific` |
+| `ingress.apiVersion`                              | Force Ingress API version (automatically detected if not set)                                                                    | `""`                     |
+| `ingress.hostname`                                | Default host for the ingress record                                                                                              | `clickhouse.local`       |
+| `ingress.ingressClassName`                        | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)                                                    | `""`                     |
+| `ingress.path`                                    | Default path for the ingress record                                                                                              | `/`                      |
+| `ingress.annotations`                             | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}`                     |
+| `ingress.tls`                                     | Enable TLS configuration for the host defined at `ingress.hostname` parameter                                                    | `false`                  |
+| `ingress.selfSigned`                              | Create a TLS secret for this ingress record using self-signed certificates generated by Helm                                     | `false`                  |
+| `ingress.extraHosts`                              | An array with additional hostname(s) to be covered with the ingress record                                                       | `[]`                     |
+| `ingress.extraPaths`                              | An array with additional arbitrary paths that may need to be added to the ingress under the main host                            | `[]`                     |
+| `ingress.extraTls`                                | TLS configuration for additional hostname(s) to be covered with this ingress record                                              | `[]`                     |
+| `ingress.secrets`                                 | Custom TLS certificates as secrets                                                                                               | `[]`                     |
+| `ingress.extraRules`                              | Additional rules to be covered with this ingress record                                                                          | `[]`                     |
+
+### Persistence Parameters
+
+| Name                       | Description                                                            | Value               |
+| -------------------------- | ---------------------------------------------------------------------- | ------------------- |
+| `persistence.enabled`      | Enable persistence using Persistent Volume Claims                      | `true`              |
+| `persistence.storageClass` | Storage class of backing PVC                                           | `""`                |
+| `persistence.labels`       | Persistent Volume Claim labels                                         | `{}`                |
+| `persistence.annotations`  | Persistent Volume Claim annotations                                    | `{}`                |
+| `persistence.accessModes`  | Persistent Volume Access Modes                                         | `["ReadWriteOnce"]` |
+| `persistence.size`         | Size of data volume                                                    | `8Gi`               |
+| `persistence.selector`     | Selector to match an existing Persistent Volume for WordPress data PVC | `{}`                |
+| `persistence.dataSource`   | Custom PVC data source                                                 | `{}`                |
+
+### Init Container Parameters
+
+| Name                                                   | Description                                                                                     | Value                   |
+| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled`                            | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false`                 |
+| `volumePermissions.image.registry`                     | Bitnami Shell image registry                                                                    | `docker.io`             |
+| `volumePermissions.image.repository`                   | Bitnami Shell image repository                                                                  | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag`                          | Bitnami Shell image tag (immutable tags are recommended)                                        | `11-debian-11-r101`     |
+| `volumePermissions.image.pullPolicy`                   | Bitnami Shell image pull policy                                                                 | `IfNotPresent`          |
+| `volumePermissions.image.pullSecrets`                  | Bitnami Shell image pull secrets                                                                | `[]`                    |
+| `volumePermissions.resources.limits`                   | The resources limits for the init container                                                     | `{}`                    |
+| `volumePermissions.resources.requests`                 | The requested resources for the init container                                                  | `{}`                    |
+| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser                                                 | `0`                     |
+
+### Other Parameters
+
+| Name                                          | Description                                                                                            | Value   |
+| --------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- |
+| `serviceAccount.create`                       | Specifies whether a ServiceAccount should be created                                                   | `true`  |
+| `serviceAccount.name`                         | The name of the ServiceAccount to use.                                                                 | `""`    |
+| `serviceAccount.annotations`                  | Additional Service Account annotations (evaluated as a template)                                       | `{}`    |
+| `serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account                                         | `true`  |
+| `metrics.enabled`                             | Enable the export of Prometheus metrics                                                                | `false` |
+| `metrics.podAnnotations`                      | Annotations for metrics scraping                                                                       | `{}`    |
+| `metrics.serviceMonitor.enabled`              | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` |
+| `metrics.serviceMonitor.namespace`            | Namespace in which Prometheus is running                                                               | `""`    |
+| `metrics.serviceMonitor.annotations`          | Additional custom annotations for the ServiceMonitor                                                   | `{}`    |
+| `metrics.serviceMonitor.labels`               | Extra labels for the ServiceMonitor                                                                    | `{}`    |
+| `metrics.serviceMonitor.jobLabel`             | The name of the label on the target service to use as the job name in Prometheus                       | `""`    |
+| `metrics.serviceMonitor.honorLabels`          | honorLabels chooses the metric's labels on collisions with target labels                               | `false` |
+| `metrics.serviceMonitor.interval`             | Interval at which metrics should be scraped.                                                           | `""`    |
+| `metrics.serviceMonitor.scrapeTimeout`        | Timeout after which the scrape is ended                                                                | `""`    |
+| `metrics.serviceMonitor.metricRelabelings`    | Specify additional relabeling of metrics                                                               | `[]`    |
+| `metrics.serviceMonitor.relabelings`          | Specify general relabeling                                                                             | `[]`    |
+| `metrics.serviceMonitor.selector`             | Prometheus instance selector labels                                                                    | `{}`    |
+
+### External Zookeeper paramaters
+
+| Name                        | Description                               | Value  |
+| --------------------------- | ----------------------------------------- | ------ |
+| `externalZookeeper.servers` | List of external zookeeper servers to use | `[]`   |
+| `externalZookeeper.port`    | Port of the Zookeeper servers             | `2888` |
+
+### Zookeeper subchart parameters
+
+| Name                             | Description                   | Value  |
+| -------------------------------- | ----------------------------- | ------ |
+| `zookeeper.enabled`              | Deploy Zookeeper subchart     | `true` |
+| `zookeeper.replicaCount`         | Number of Zookeeper instances | `3`    |
+| `zookeeper.service.ports.client` | Zookeeper client port         | `2181` |
+
+See <https://github.com/bitnami-labs/readme-generator-for-helm> to create the table.
+
+The above parameters map to the env variables defined in [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse). For more information please refer to the [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image documentation.
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release \
+  --set auth.username=admin \
+  --set auth.password=password \
+    my-repo/clickhouse
+```
+
+The above command sets the ClickHouse administrator account username and password to `admin` and `password` respectively.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml my-repo/clickhouse
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### ClickHouse keeper support
+
+You can set `keeper.enabled` to use ClickHouse keeper. If `keeper.enabled=true`, Zookeeper settings will not be ignore.
+
+### External Zookeeper support
+
+You may want to have ClickHouse connect to an external zookeeper rather than installing one inside your cluster. Typical reasons for this are to use a managed database service, or to share a common database server for all your applications. To achieve this, the chart allows you to specify credentials for an external database with the [`externalZookeeper` parameter](#parameters). You should also disable the Zookeeper installation with the `zookeeper.enabled` option. Here is an example:
+
+```console
+zookeper.enabled=false
+externalZookeeper.host=myexternalhost
+externalZookeeper.user=myuser
+externalZookeeper.password=mypassword
+externalZookeeper.database=mydatabase
+externalZookeeper.port=3306
+```
+
+### TLS secrets
+
+The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/administration/enable-tls-ingress/)).
+
+## Persistence
+
+The [Bitnami ClickHouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image stores the ClickHouse data and configurations at the `/bitnami` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+
+### Additional environment variables
+
+In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
+
+```yaml
+clickhouse:
+  extraEnvVars:
+    - name: LOG_LEVEL
+      value: error
+```
+
+Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
+
+### Sidecars
+
+If additional containers are needed in the same pod as ClickHouse (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter. If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter. [Learn more about configuring and using sidecar containers](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/configuration/configure-sidecar-init-containers/).
+
+### Ingress without TLS
+
+For using ingress (example without TLS):
+
+```yaml
+ingress:
+  ## If true, ClickHouse server Ingress will be created
+  ##
+  enabled: true
+
+  ## ClickHouse server Ingress annotations
+  ##
+  annotations: {}
+  #   kubernetes.io/ingress.class: nginx
+  #   kubernetes.io/tls-acme: 'true'
+
+  ## ClickHouse server Ingress hostnames
+  ## Must be provided if Ingress is enabled
+  ##
+  hosts:
+    - clickhouse.domain.com
+```
+
+### Ingress TLS
+
+If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism.
+
+To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret (named `clickhouse-server-tls` in this example) in the namespace. Include the secret's name, along with the desired hostnames, in the Ingress TLS section of your custom `values.yaml` file:
+
+```yaml
+ingress:
+  ## If true, ClickHouse server Ingress will be created
+  ##
+  enabled: true
+
+  ## ClickHouse server Ingress annotations
+  ##
+  annotations: {}
+  #   kubernetes.io/ingress.class: nginx
+  #   kubernetes.io/tls-acme: 'true'
+
+  ## ClickHouse server Ingress hostnames
+  ## Must be provided if Ingress is enabled
+  ##
+  hosts:
+    - clickhouse.domain.com
+
+  ## ClickHouse server Ingress TLS configuration
+  ## Secrets must be manually created in the namespace
+  ##
+  tls:
+    - secretName: clickhouse-server-tls
+      hosts:
+        - clickhouse.domain.com
+```
+
+### Using custom scripts
+
+For advanced operations, the Bitnami ClickHouse chart allows using custom init and start scripts that will be mounted in `/docker-entrypoint.initdb.d` and `/docker-entrypoint.startdb.d` . The `init` scripts will be run on the first boot whereas the `start` scripts will be run on every container start. For adding the scripts directly as values use the `initdbScripts` and `startdbScripts` values. For using Secrets use the `initdbScriptsSecret` and `startdbScriptsSecret`.
+
+```yaml
+initdbScriptsSecret: init-scripts-secret
+startdbScriptsSecret: start-scripts-secret
+```
+
+### Pod affinity
+
+This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 2.0.0
+
+This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100).
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 22 - 0
clickhouse/charts/common/.helmignore

@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 24 - 0
clickhouse/charts/common/Chart.yaml

@@ -0,0 +1,24 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.2.4
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+  This chart is not deployable by itself.
+home: https://github.com/bitnami/charts/tree/main/bitnami/common
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: Bitnami
+  url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+- https://www.bitnami.com/
+type: library
+version: 2.2.4

+ 233 - 0
clickhouse/charts/common/README.md

@@ -0,0 +1,233 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+  - name: common
+    version: 1.x.x
+    repository: https://charts.bitnami.com/bitnami
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.names.fullname" . }}
+data:
+  myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+  type: string
+  description: Docker registry where the image is located
+  example: docker.io
+
+repository:
+  type: string
+  description: Repository and image name
+  example: bitnami/nginx
+
+tag:
+  type: string
+  description: image tag
+  example: 1.16.1-debian-10-r63
+
+pullPolicy:
+  type: string
+  description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+  type: array
+  items:
+    type: string
+  description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+  type: boolean
+  description: Set to true if you would like to see extra information on logs
+  example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+  type: boolean
+  description: Whether enable persistence.
+  example: true
+
+storageClass:
+  type: string
+  description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+  example: "-"
+
+accessMode:
+  type: string
+  description: Access mode for the Persistent Volume Storage.
+  example: ReadWriteOnce
+
+size:
+  type: string
+  description: Size the Persistent Volume Storage.
+  example: 8Gi
+
+path:
+  type: string
+  description: Path to be persisted.
+  example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+  type: string
+  description: Name of the existing secret.
+  example: mySecret
+keyMapping:
+  description: Mapping between the expected key name and the name of the key in the existing secret.
+  type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+#   password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  labels:
+    app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+  password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+      env:
+        - name: PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+              key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+  password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+    'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+        export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+    'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+        export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 106 - 0
clickhouse/charts/common/templates/_affinities.tpl

@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - preference:
+      matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  nodeSelectorTerms:
+    - matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.nodes.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.nodes.hard" . -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - podAffinityTerm:
+      labelSelector:
+        matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+          {{- if not (empty $component) }}
+          {{ printf "app.kubernetes.io/component: %s" $component }}
+          {{- end }}
+          {{- range $key, $value := $extraMatchLabels }}
+          {{ $key }}: {{ $value | quote }}
+          {{- end }}
+      topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  - labelSelector:
+      matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+        {{- if not (empty $component) }}
+        {{ printf "app.kubernetes.io/component: %s" $component }}
+        {{- end }}
+        {{- range $key, $value := $extraMatchLabels }}
+        {{ $key }}: {{ $value | quote }}
+        {{- end }}
+    topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.pods.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.pods.hard" . -}}
+  {{- end -}}
+{{- end -}}

+ 154 - 0
clickhouse/charts/common/templates/_capabilities.tpl

@@ -0,0 +1,154 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+    {{- if .Values.global.kubeVersion }}
+    {{- .Values.global.kubeVersion -}}
+    {{- else }}
+    {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+    {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}"  structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
clickhouse/charts/common/templates/_errors.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+  - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+  - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+  {{- $validationErrors := join "" .validationErrors -}}
+  {{- if and $validationErrors .context.Release.IsUpgrade -}}
+    {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+    {{- $errorString = print $errorString "\n                 Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+    {{- $errorString = print $errorString "\n                 Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+    {{- $errorString = print $errorString "\n%s" -}}
+    {{- printf $errorString $validationErrors | fail -}}
+  {{- end -}}
+{{- end -}}

+ 80 - 0
clickhouse/charts/common/templates/_images.tpl

@@ -0,0 +1,80 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+    {{- if .global.imageRegistry }}
+     {{- $registryName = .global.imageRegistry -}}
+    {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+    {{- $separator = "@" -}}
+    {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+    {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+    {{- printf "%s%s%s"  $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+  {{- $pullSecrets := list }}
+
+  {{- if .global }}
+    {{- range .global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+  {{- $pullSecrets := list }}
+  {{- $context := .context }}
+
+  {{- if $context.Values.global }}
+    {{- range $context.Values.global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}

+ 68 - 0
clickhouse/charts/common/templates/_ingress.tpl

@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+  - serviceName - String. Name of an existing service backend
+  - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+  name: {{ .serviceName }}
+  port:
+    {{- if typeIs "string" .servicePort }}
+    name: {{ .servicePort }}
+    {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+    number: {{ .servicePort | int }}
+    {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}

+ 18 - 0
clickhouse/charts/common/templates/_labels.tpl

@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}

+ 66 - 0
clickhouse/charts/common/templates/_names.tpl

@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}

+ 165 - 0
clickhouse/charts/common/templates/_secrets.tpl

@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+  {{- if not (typeIs "string" .existingSecret) -}}
+    {{- if .existingSecret.keyMapping -}}
+      {{- $key = index .existingSecret.keyMapping $.key -}}
+    {{- end -}}
+  {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - length - int - Optional - Length of the generated random password.
+  - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+  - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+  - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+  1. Already existing 'Secret' resource
+     (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+  2. Password provided via the values.yaml
+     (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+  3. Randomly generated secret password
+     (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+  {{- if hasKey $secretData .key }}
+    {{- $password = index $secretData .key | quote }}
+  {{- else }}
+    {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+  {{- end -}}
+{{- else if $providedPasswordValue }}
+  {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+  {{- if .context.Values.enabled }}
+    {{- $subchart = $chartName }}
+  {{- end -}}
+
+  {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+  {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+  {{- $passwordValidationErrors := list $requiredPasswordError -}}
+  {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+  {{- if .strong }}
+    {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+    {{- $password = randAscii $passwordLength }}
+    {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+    {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+  {{- else }}
+    {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+  {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+  {{- $value = index $secretData .key -}}
+{{- else -}}
+  {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
clickhouse/charts/common/templates/_storage.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return  the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- $storageClass = .global.storageClass -}}
+    {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+  {{- if (eq "-" $storageClass) -}}
+      {{- printf "storageClassName: \"\"" -}}
+  {{- else }}
+      {{- printf "storageClassName: %s" $storageClass -}}
+  {{- end -}}
+{{- end -}}
+
+{{- end -}}

+ 13 - 0
clickhouse/charts/common/templates/_tplvalues.tpl

@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}

+ 62 - 0
clickhouse/charts/common/templates/_utils.tpl

@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+  {{- $fieldNameSplit := splitList "-" .field -}}
+  {{- $upperCaseFieldNameSplit := list -}}
+
+  {{- range $fieldNameSplit -}}
+    {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+  {{- end -}}
+
+  {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+  {{- if not $latestObj -}}
+    {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+  {{- end -}}
+  {{- $value = ( index $latestObj . ) -}}
+  {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}} 
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+  {{- if $value -}}
+    {{- $key = . }}
+  {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}} 
+{{- end -}}

+ 14 - 0
clickhouse/charts/common/templates/_warnings.tpl

@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}

+ 72 - 0
clickhouse/charts/common/templates/validations/_cassandra.tpl

@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+  {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+  {{- $enabled := include "common.cassandra.values.enabled" . -}}
+  {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+  {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.dbUser.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.cassandra.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+  {{- if .subchart -}}
+    cassandra.dbUser
+  {{- else -}}
+    dbUser
+  {{- end -}}
+{{- end -}}

+ 103 - 0
clickhouse/charts/common/templates/validations/_mariadb.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+  {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mariadb.values.enabled" . -}}
+  {{- $architecture := include "common.mariadb.values.architecture" . -}}
+  {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mariadb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mariadb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 108 - 0
clickhouse/charts/common/templates/validations/_mongodb.tpl

@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MongoDB&reg; values are stored, e.g: "mongodb-passwords-secret"
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+  {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mongodb.values.enabled" . -}}
+  {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+  {{- $architecture := include "common.mongodb.values.architecture" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+  {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+  {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+    {{- if and $valueUsername $valueDatabase -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replicaset") -}}
+        {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mongodb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mongodb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}

+ 103 - 0
clickhouse/charts/common/templates/validations/_mysql.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+  {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mysql.values.enabled" . -}}
+  {{- $architecture := include "common.mysql.values.architecture" . -}}
+  {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mysql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+  {{- if .subchart -}}
+    mysql.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 129 - 0
clickhouse/charts/common/templates/validations/_postgresql.tpl

@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+  {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+  {{- $enabled := include "common.postgresql.values.enabled" . -}}
+  {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+  {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+    {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+    {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+    {{- if (eq $enabledReplication "true") -}}
+        {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+  - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+  {{- if .context.Values.global -}}
+    {{- if .context.Values.global.postgresql -}}
+      {{- index .context.Values.global.postgresql .key | quote -}}
+    {{- end -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+  {{- if .subchart -}}
+    {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+  {{- else -}}
+    {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+  {{- if not $globalValue -}}
+    {{- if .subchart -}}
+      postgresql.postgresqlPassword
+    {{- else -}}
+      postgresqlPassword
+    {{- end -}}
+  {{- else -}}
+    global.postgresql.postgresqlPassword
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+  {{- else -}}
+    {{- printf "%v" .context.Values.replication.enabled -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+  {{- if .subchart -}}
+    postgresql.replication.password
+  {{- else -}}
+    replication.password
+  {{- end -}}
+{{- end -}}

+ 76 - 0
clickhouse/charts/common/templates/validations/_redis.tpl

@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+  {{- $enabled := include "common.redis.values.enabled" . -}}
+  {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+  {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+  {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+  {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+  {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+  {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+    {{- if eq $useAuth "true" -}}
+      {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+      {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.redis.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+  {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+  {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+  {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+  {{- if $standarizedAuthValues -}}
+    {{- true -}}
+  {{- end -}}
+{{- end -}}

+ 46 - 0
clickhouse/charts/common/templates/validations/_validations.tpl

@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+  {{- range .required -}}
+    {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+  - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+  {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+  {{- if not $value -}}
+    {{- $varname := "my-value" -}}
+    {{- $getCurrentValue := "" -}}
+    {{- if and .secret .field -}}
+      {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+      {{- $getCurrentValue = printf " To get the current value:\n\n        %s\n" (include "common.utils.secret.getvalue" .) -}}
+    {{- end -}}
+    {{- printf "\n    '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+  {{- end -}}
+{{- end -}}

+ 5 - 0
clickhouse/charts/common/values.yaml

@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart

+ 21 - 0
clickhouse/charts/zookeeper/.helmignore

@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj

+ 6 - 0
clickhouse/charts/zookeeper/Chart.lock

@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+  repository: https://charts.bitnami.com/bitnami
+  version: 2.2.4
+digest: sha256:634d19e9b7f6e4c07d7c04a0161ab96b3f83335ebdd70b35b952319ef0a2586b
+generated: "2023-03-19T02:06:13.108650823Z"

+ 25 - 0
clickhouse/charts/zookeeper/Chart.yaml

@@ -0,0 +1,25 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 3.8.1
+dependencies:
+- name: common
+  repository: https://charts.bitnami.com/bitnami
+  tags:
+  - bitnami-common
+  version: 2.x.x
+description: Apache ZooKeeper provides a reliable, centralized register of configuration
+  data and services for distributed applications.
+home: https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
+icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png
+keywords:
+- zookeeper
+maintainers:
+- name: Bitnami
+  url: https://github.com/bitnami/charts
+name: zookeeper
+sources:
+- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper
+- https://zookeeper.apache.org/
+version: 11.1.5

+ 524 - 0
clickhouse/charts/zookeeper/README.md

@@ -0,0 +1,524 @@
+<!--- app-name: Apache ZooKeeper -->
+
+# Apache ZooKeeper packaged by Bitnami
+
+Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications.
+
+[Overview of Apache ZooKeeper](https://zookeeper.apache.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm repo add my-repo https://charts.bitnami.com/bitnami
+helm install my-release my-repo/zookeeper
+```
+
+## Introduction
+
+This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm repo add my-repo https://charts.bitnami.com/bitnami
+helm install my-release my-repo/zookeeper
+```
+
+These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name                      | Description                                     | Value |
+| ------------------------- | ----------------------------------------------- | ----- |
+| `global.imageRegistry`    | Global Docker image registry                    | `""`  |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]`  |
+| `global.storageClass`     | Global StorageClass for Persistent Volume(s)    | `""`  |
+
+### Common parameters
+
+| Name                     | Description                                                                                  | Value           |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion`            | Override Kubernetes version                                                                  | `""`            |
+| `nameOverride`           | String to partially override common.names.fullname template (will maintain the release name) | `""`            |
+| `fullnameOverride`       | String to fully override common.names.fullname template                                      | `""`            |
+| `clusterDomain`          | Kubernetes Cluster Domain                                                                    | `cluster.local` |
+| `extraDeploy`            | Extra objects to deploy (evaluated as a template)                                            | `[]`            |
+| `commonLabels`           | Add labels to all the deployed resources                                                     | `{}`            |
+| `commonAnnotations`      | Add annotations to all the deployed resources                                                | `{}`            |
+| `namespaceOverride`      | Override namespace for ZooKeeper resources                                                   | `""`            |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden)      | `false`         |
+| `diagnosticMode.command` | Command to override all containers in the statefulset                                        | `["sleep"]`     |
+| `diagnosticMode.args`    | Args to override all containers in the statefulset                                           | `["infinity"]`  |
+
+### ZooKeeper chart parameters
+
+| Name                          | Description                                                                                                                | Value                   |
+| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `image.registry`              | ZooKeeper image registry                                                                                                   | `docker.io`             |
+| `image.repository`            | ZooKeeper image repository                                                                                                 | `bitnami/zookeeper`     |
+| `image.tag`                   | ZooKeeper image tag (immutable tags are recommended)                                                                       | `3.8.1-debian-11-r15`   |
+| `image.digest`                | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                  | `""`                    |
+| `image.pullPolicy`            | ZooKeeper image pull policy                                                                                                | `IfNotPresent`          |
+| `image.pullSecrets`           | Specify docker-registry secret names as an array                                                                           | `[]`                    |
+| `image.debug`                 | Specify if debug values should be set                                                                                      | `false`                 |
+| `auth.client.enabled`         | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5                                                     | `false`                 |
+| `auth.client.clientUser`      | User that will use ZooKeeper clients to auth                                                                               | `""`                    |
+| `auth.client.clientPassword`  | Password that will use ZooKeeper clients to auth                                                                           | `""`                    |
+| `auth.client.serverUsers`     | Comma, semicolon or whitespace separated list of user to be created                                                        | `""`                    |
+| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created                                 | `""`                    |
+| `auth.client.existingSecret`  | Use existing secret (ignores previous passwords)                                                                           | `""`                    |
+| `auth.quorum.enabled`         | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5                                                     | `false`                 |
+| `auth.quorum.learnerUser`     | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers.                                           | `""`                    |
+| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers.                                       | `""`                    |
+| `auth.quorum.serverUsers`     | Comma, semicolon or whitespace separated list of users for the quorumServers.                                              | `""`                    |
+| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created                                 | `""`                    |
+| `auth.quorum.existingSecret`  | Use existing secret (ignores previous passwords)                                                                           | `""`                    |
+| `tickTime`                    | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats                                                         | `2000`                  |
+| `initLimit`                   | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader                     | `10`                    |
+| `syncLimit`                   | How far out of date a server can be from a leader                                                                          | `5`                     |
+| `preAllocSize`                | Block size for transaction log file                                                                                        | `65536`                 |
+| `snapCount`                   | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000`                |
+| `maxClientCnxns`              | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble     | `60`                    |
+| `maxSessionTimeout`           | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate                               | `40000`                 |
+| `heapSize`                    | Size (in MB) for the Java Heap options (Xmx and Xms)                                                                       | `1024`                  |
+| `fourlwCommandsWhitelist`     | A list of comma separated Four Letter Words commands that can be executed                                                  | `srvr, mntr, ruok`      |
+| `minServerId`                 | Minimal SERVER_ID value, nodes increment their IDs respectively                                                            | `1`                     |
+| `listenOnAllIPs`              | Allow ZooKeeper to listen for connections from its peers on all available IP addresses                                     | `false`                 |
+| `autopurge.snapRetainCount`   | The most recent snapshots amount (and corresponding transaction logs) to retain                                            | `3`                     |
+| `autopurge.purgeInterval`     | The time interval (in hours) for which the purge task has to be triggered                                                  | `0`                     |
+| `logLevel`                    | Log level for the ZooKeeper server. ERROR by default                                                                       | `ERROR`                 |
+| `jvmFlags`                    | Default JVM flags for the ZooKeeper process                                                                                | `""`                    |
+| `dataLogDir`                  | Dedicated data log directory                                                                                               | `""`                    |
+| `configuration`               | Configure ZooKeeper with a custom zoo.cfg file                                                                             | `""`                    |
+| `existingConfigmap`           | The name of an existing ConfigMap with your custom configuration for ZooKeeper                                             | `""`                    |
+| `extraEnvVars`                | Array with extra environment variables to add to ZooKeeper nodes                                                           | `[]`                    |
+| `extraEnvVarsCM`              | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes                                                   | `""`                    |
+| `extraEnvVarsSecret`          | Name of existing Secret containing extra env vars for ZooKeeper nodes                                                      | `""`                    |
+| `command`                     | Override default container command (useful when using custom images)                                                       | `["/scripts/setup.sh"]` |
+| `args`                        | Override default container args (useful when using custom images)                                                          | `[]`                    |
+
+### Statefulset parameters
+
+| Name                                                | Description                                                                                                                                                                                       | Value           |
+| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `replicaCount`                                      | Number of ZooKeeper nodes                                                                                                                                                                         | `1`             |
+| `containerPorts.client`                             | ZooKeeper client container port                                                                                                                                                                   | `2181`          |
+| `containerPorts.tls`                                | ZooKeeper TLS container port                                                                                                                                                                      | `3181`          |
+| `containerPorts.follower`                           | ZooKeeper follower container port                                                                                                                                                                 | `2888`          |
+| `containerPorts.election`                           | ZooKeeper election container port                                                                                                                                                                 | `3888`          |
+| `livenessProbe.enabled`                             | Enable livenessProbe on ZooKeeper containers                                                                                                                                                      | `true`          |
+| `livenessProbe.initialDelaySeconds`                 | Initial delay seconds for livenessProbe                                                                                                                                                           | `30`            |
+| `livenessProbe.periodSeconds`                       | Period seconds for livenessProbe                                                                                                                                                                  | `10`            |
+| `livenessProbe.timeoutSeconds`                      | Timeout seconds for livenessProbe                                                                                                                                                                 | `5`             |
+| `livenessProbe.failureThreshold`                    | Failure threshold for livenessProbe                                                                                                                                                               | `6`             |
+| `livenessProbe.successThreshold`                    | Success threshold for livenessProbe                                                                                                                                                               | `1`             |
+| `livenessProbe.probeCommandTimeout`                 | Probe command timeout for livenessProbe                                                                                                                                                           | `2`             |
+| `readinessProbe.enabled`                            | Enable readinessProbe on ZooKeeper containers                                                                                                                                                     | `true`          |
+| `readinessProbe.initialDelaySeconds`                | Initial delay seconds for readinessProbe                                                                                                                                                          | `5`             |
+| `readinessProbe.periodSeconds`                      | Period seconds for readinessProbe                                                                                                                                                                 | `10`            |
+| `readinessProbe.timeoutSeconds`                     | Timeout seconds for readinessProbe                                                                                                                                                                | `5`             |
+| `readinessProbe.failureThreshold`                   | Failure threshold for readinessProbe                                                                                                                                                              | `6`             |
+| `readinessProbe.successThreshold`                   | Success threshold for readinessProbe                                                                                                                                                              | `1`             |
+| `readinessProbe.probeCommandTimeout`                | Probe command timeout for readinessProbe                                                                                                                                                          | `2`             |
+| `startupProbe.enabled`                              | Enable startupProbe on ZooKeeper containers                                                                                                                                                       | `false`         |
+| `startupProbe.initialDelaySeconds`                  | Initial delay seconds for startupProbe                                                                                                                                                            | `30`            |
+| `startupProbe.periodSeconds`                        | Period seconds for startupProbe                                                                                                                                                                   | `10`            |
+| `startupProbe.timeoutSeconds`                       | Timeout seconds for startupProbe                                                                                                                                                                  | `1`             |
+| `startupProbe.failureThreshold`                     | Failure threshold for startupProbe                                                                                                                                                                | `15`            |
+| `startupProbe.successThreshold`                     | Success threshold for startupProbe                                                                                                                                                                | `1`             |
+| `customLivenessProbe`                               | Custom livenessProbe that overrides the default one                                                                                                                                               | `{}`            |
+| `customReadinessProbe`                              | Custom readinessProbe that overrides the default one                                                                                                                                              | `{}`            |
+| `customStartupProbe`                                | Custom startupProbe that overrides the default one                                                                                                                                                | `{}`            |
+| `lifecycleHooks`                                    | for the ZooKeeper container(s) to automate configuration before or after startup                                                                                                                  | `{}`            |
+| `resources.limits`                                  | The resources limits for the ZooKeeper containers                                                                                                                                                 | `{}`            |
+| `resources.requests.memory`                         | The requested memory for the ZooKeeper containers                                                                                                                                                 | `256Mi`         |
+| `resources.requests.cpu`                            | The requested cpu for the ZooKeeper containers                                                                                                                                                    | `250m`          |
+| `podSecurityContext.enabled`                        | Enabled ZooKeeper pods' Security Context                                                                                                                                                          | `true`          |
+| `podSecurityContext.fsGroup`                        | Set ZooKeeper pod's Security Context fsGroup                                                                                                                                                      | `1001`          |
+| `containerSecurityContext.enabled`                  | Enabled ZooKeeper containers' Security Context                                                                                                                                                    | `true`          |
+| `containerSecurityContext.runAsUser`                | Set ZooKeeper containers' Security Context runAsUser                                                                                                                                              | `1001`          |
+| `containerSecurityContext.runAsNonRoot`             | Set ZooKeeper containers' Security Context runAsNonRoot                                                                                                                                           | `true`          |
+| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege                                                                                                                                                 | `false`         |
+| `hostAliases`                                       | ZooKeeper pods host aliases                                                                                                                                                                       | `[]`            |
+| `podLabels`                                         | Extra labels for ZooKeeper pods                                                                                                                                                                   | `{}`            |
+| `podAnnotations`                                    | Annotations for ZooKeeper pods                                                                                                                                                                    | `{}`            |
+| `podAffinityPreset`                                 | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                               | `""`            |
+| `podAntiAffinityPreset`                             | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                          | `soft`          |
+| `nodeAffinityPreset.type`                           | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                         | `""`            |
+| `nodeAffinityPreset.key`                            | Node label key to match Ignored if `affinity` is set.                                                                                                                                             | `""`            |
+| `nodeAffinityPreset.values`                         | Node label values to match. Ignored if `affinity` is set.                                                                                                                                         | `[]`            |
+| `affinity`                                          | Affinity for pod assignment                                                                                                                                                                       | `{}`            |
+| `nodeSelector`                                      | Node labels for pod assignment                                                                                                                                                                    | `{}`            |
+| `tolerations`                                       | Tolerations for pod assignment                                                                                                                                                                    | `[]`            |
+| `topologySpreadConstraints`                         | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template                                                                          | `[]`            |
+| `podManagementPolicy`                               | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel`      |
+| `priorityClassName`                                 | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand                                                                                   | `""`            |
+| `schedulerName`                                     | Kubernetes pod scheduler registry                                                                                                                                                                 | `""`            |
+| `updateStrategy.type`                               | ZooKeeper statefulset strategy type                                                                                                                                                               | `RollingUpdate` |
+| `updateStrategy.rollingUpdate`                      | ZooKeeper statefulset rolling update configuration parameters                                                                                                                                     | `{}`            |
+| `extraVolumes`                                      | Optionally specify extra list of additional volumes for the ZooKeeper pod(s)                                                                                                                      | `[]`            |
+| `extraVolumeMounts`                                 | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s)                                                                                                           | `[]`            |
+| `sidecars`                                          | Add additional sidecar containers to the ZooKeeper pod(s)                                                                                                                                         | `[]`            |
+| `initContainers`                                    | Add additional init containers to the ZooKeeper pod(s)                                                                                                                                            | `[]`            |
+| `pdb.create`                                        | Deploy a pdb object for the ZooKeeper pod                                                                                                                                                         | `false`         |
+| `pdb.minAvailable`                                  | Minimum available ZooKeeper replicas                                                                                                                                                              | `""`            |
+| `pdb.maxUnavailable`                                | Maximum unavailable ZooKeeper replicas                                                                                                                                                            | `1`             |
+
+### Traffic Exposure parameters
+
+| Name                                        | Description                                                                             | Value       |
+| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- |
+| `service.type`                              | Kubernetes Service type                                                                 | `ClusterIP` |
+| `service.ports.client`                      | ZooKeeper client service port                                                           | `2181`      |
+| `service.ports.tls`                         | ZooKeeper TLS service port                                                              | `3181`      |
+| `service.ports.follower`                    | ZooKeeper follower service port                                                         | `2888`      |
+| `service.ports.election`                    | ZooKeeper election service port                                                         | `3888`      |
+| `service.nodePorts.client`                  | Node port for clients                                                                   | `""`        |
+| `service.nodePorts.tls`                     | Node port for TLS                                                                       | `""`        |
+| `service.disableBaseClientPort`             | Remove client port from service definitions.                                            | `false`     |
+| `service.sessionAffinity`                   | Control where client requests go, to the same pod or round-robin                        | `None`      |
+| `service.sessionAffinityConfig`             | Additional settings for the sessionAffinity                                             | `{}`        |
+| `service.clusterIP`                         | ZooKeeper service Cluster IP                                                            | `""`        |
+| `service.loadBalancerIP`                    | ZooKeeper service Load Balancer IP                                                      | `""`        |
+| `service.loadBalancerSourceRanges`          | ZooKeeper service Load Balancer sources                                                 | `[]`        |
+| `service.externalTrafficPolicy`             | ZooKeeper service external traffic policy                                               | `Cluster`   |
+| `service.annotations`                       | Additional custom annotations for ZooKeeper service                                     | `{}`        |
+| `service.extraPorts`                        | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]`        |
+| `service.headless.annotations`              | Annotations for the Headless Service                                                    | `{}`        |
+| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods         | `true`      |
+| `service.headless.servicenameOverride`      | String to partially override headless service name                                      | `""`        |
+| `networkPolicy.enabled`                     | Specifies whether a NetworkPolicy should be created                                     | `false`     |
+| `networkPolicy.allowExternal`               | Don't require client label for connections                                              | `true`      |
+
+### Other Parameters
+
+| Name                                          | Description                                                            | Value   |
+| --------------------------------------------- | ---------------------------------------------------------------------- | ------- |
+| `serviceAccount.create`                       | Enable creation of ServiceAccount for ZooKeeper pod                    | `false` |
+| `serviceAccount.name`                         | The name of the ServiceAccount to use.                                 | `""`    |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true`  |
+| `serviceAccount.annotations`                  | Additional custom annotations for the ServiceAccount                   | `{}`    |
+
+### Persistence parameters
+
+| Name                                   | Description                                                                    | Value               |
+| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- |
+| `persistence.enabled`                  | Enable ZooKeeper data persistence using PVC. If false, use emptyDir            | `true`              |
+| `persistence.existingClaim`            | Name of an existing PVC to use (only when deploying a single replica)          | `""`                |
+| `persistence.storageClass`             | PVC Storage Class for ZooKeeper data volume                                    | `""`                |
+| `persistence.accessModes`              | PVC Access modes                                                               | `["ReadWriteOnce"]` |
+| `persistence.size`                     | PVC Storage Request for ZooKeeper data volume                                  | `8Gi`               |
+| `persistence.annotations`              | Annotations for the PVC                                                        | `{}`                |
+| `persistence.labels`                   | Labels for the PVC                                                             | `{}`                |
+| `persistence.selector`                 | Selector to match an existing Persistent Volume for ZooKeeper's data PVC       | `{}`                |
+| `persistence.dataLogDir.size`          | PVC Storage Request for ZooKeeper's dedicated data log directory               | `8Gi`               |
+| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""`                |
+| `persistence.dataLogDir.selector`      | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC   | `{}`                |
+
+### Volume Permissions parameters
+
+| Name                                                   | Description                                                                                                                       | Value                   |
+| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled`                            | Enable init container that changes the owner and group of the persistent volume                                                   | `false`                 |
+| `volumePermissions.image.registry`                     | Init container volume-permissions image registry                                                                                  | `docker.io`             |
+| `volumePermissions.image.repository`                   | Init container volume-permissions image repository                                                                                | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag`                          | Init container volume-permissions image tag (immutable tags are recommended)                                                      | `11-debian-11-r98`      |
+| `volumePermissions.image.digest`                       | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""`                    |
+| `volumePermissions.image.pullPolicy`                   | Init container volume-permissions image pull policy                                                                               | `IfNotPresent`          |
+| `volumePermissions.image.pullSecrets`                  | Init container volume-permissions image pull secrets                                                                              | `[]`                    |
+| `volumePermissions.resources.limits`                   | Init container volume-permissions resource limits                                                                                 | `{}`                    |
+| `volumePermissions.resources.requests`                 | Init container volume-permissions resource requests                                                                               | `{}`                    |
+| `volumePermissions.containerSecurityContext.enabled`   | Enabled init container Security Context                                                                                           | `true`                  |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container                                                                                                    | `0`                     |
+
+### Metrics parameters
+
+| Name                                       | Description                                                                           | Value       |
+| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- |
+| `metrics.enabled`                          | Enable Prometheus to access ZooKeeper metrics endpoint                                | `false`     |
+| `metrics.containerPort`                    | ZooKeeper Prometheus Exporter container port                                          | `9141`      |
+| `metrics.service.type`                     | ZooKeeper Prometheus Exporter service type                                            | `ClusterIP` |
+| `metrics.service.port`                     | ZooKeeper Prometheus Exporter service port                                            | `9141`      |
+| `metrics.service.annotations`              | Annotations for Prometheus to auto-discover the metrics endpoint                      | `{}`        |
+| `metrics.serviceMonitor.enabled`           | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator         | `false`     |
+| `metrics.serviceMonitor.namespace`         | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)         | `""`        |
+| `metrics.serviceMonitor.interval`          | Interval at which metrics should be scraped.                                          | `""`        |
+| `metrics.serviceMonitor.scrapeTimeout`     | Timeout after which the scrape is ended                                               | `""`        |
+| `metrics.serviceMonitor.additionalLabels`  | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}`        |
+| `metrics.serviceMonitor.selector`          | Prometheus instance selector labels                                                   | `{}`        |
+| `metrics.serviceMonitor.relabelings`       | RelabelConfigs to apply to samples before scraping                                    | `[]`        |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion                             | `[]`        |
+| `metrics.serviceMonitor.honorLabels`       | Specify honorLabels parameter to add the scrape endpoint                              | `false`     |
+| `metrics.serviceMonitor.jobLabel`          | The name of the label on the target service to use as the job name in prometheus.     | `""`        |
+| `metrics.prometheusRule.enabled`           | Create a PrometheusRule for Prometheus Operator                                       | `false`     |
+| `metrics.prometheusRule.namespace`         | Namespace for the PrometheusRule Resource (defaults to the Release Namespace)         | `""`        |
+| `metrics.prometheusRule.additionalLabels`  | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}`        |
+| `metrics.prometheusRule.rules`             | PrometheusRule definitions                                                            | `[]`        |
+
+### TLS/SSL parameters
+
+| Name                                      | Description                                                                                        | Value                                                                 |
+| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- |
+| `tls.client.enabled`                      | Enable TLS for client connections                                                                  | `false`                                                               |
+| `tls.client.auth`                         | SSL Client auth. Can be "none", "want" or "need".                                                  | `none`                                                                |
+| `tls.client.autoGenerated`                | Generate automatically self-signed TLS certificates for ZooKeeper client communications            | `false`                                                               |
+| `tls.client.existingSecret`               | Name of the existing secret containing the TLS certificates for ZooKeeper client communications    | `""`                                                                  |
+| `tls.client.existingSecretKeystoreKey`    | The secret key from the tls.client.existingSecret containing the Keystore.                         | `""`                                                                  |
+| `tls.client.existingSecretTruststoreKey`  | The secret key from the tls.client.existingSecret containing the Truststore.                       | `""`                                                                  |
+| `tls.client.keystorePath`                 | Location of the KeyStore file used for Client connections                                          | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks`   |
+| `tls.client.truststorePath`               | Location of the TrustStore file used for Client connections                                        | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` |
+| `tls.client.passwordsSecretName`          | Existing secret containing Keystore and truststore passwords                                       | `""`                                                                  |
+| `tls.client.passwordsSecretKeystoreKey`   | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore.   | `""`                                                                  |
+| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""`                                                                  |
+| `tls.client.keystorePassword`             | Password to access KeyStore if needed                                                              | `""`                                                                  |
+| `tls.client.truststorePassword`           | Password to access TrustStore if needed                                                            | `""`                                                                  |
+| `tls.quorum.enabled`                      | Enable TLS for quorum protocol                                                                     | `false`                                                               |
+| `tls.quorum.auth`                         | SSL Quorum Client auth. Can be "none", "want" or "need".                                           | `none`                                                                |
+| `tls.quorum.autoGenerated`                | Create self-signed TLS certificates. Currently only supports PEM certificates.                     | `false`                                                               |
+| `tls.quorum.existingSecret`               | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol          | `""`                                                                  |
+| `tls.quorum.existingSecretKeystoreKey`    | The secret key from the tls.quorum.existingSecret containing the Keystore.                         | `""`                                                                  |
+| `tls.quorum.existingSecretTruststoreKey`  | The secret key from the tls.quorum.existingSecret containing the Truststore.                       | `""`                                                                  |
+| `tls.quorum.keystorePath`                 | Location of the KeyStore file used for Quorum protocol                                             | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks`   |
+| `tls.quorum.truststorePath`               | Location of the TrustStore file used for Quorum protocol                                           | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` |
+| `tls.quorum.passwordsSecretName`          | Existing secret containing Keystore and truststore passwords                                       | `""`                                                                  |
+| `tls.quorum.passwordsSecretKeystoreKey`   | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore.   | `""`                                                                  |
+| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""`                                                                  |
+| `tls.quorum.keystorePassword`             | Password to access KeyStore if needed                                                              | `""`                                                                  |
+| `tls.quorum.truststorePassword`           | Password to access TrustStore if needed                                                            | `""`                                                                  |
+| `tls.resources.limits`                    | The resources limits for the TLS init container                                                    | `{}`                                                                  |
+| `tls.resources.requests`                  | The requested resources for the TLS init container                                                 | `{}`                                                                  |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release \
+  --set auth.clientUser=newUser \
+    my-repo/zookeeper
+```
+
+The above command sets the ZooKeeper user to `newUser`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml my-repo/zookeeper
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Configure log level
+
+You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs.
+
+In order to remove that log noise so levels can be set to 'INFO', two changes must be made.
+
+First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`.
+
+Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes:
+
+```yaml
+livenessProbe:
+  enabled: false
+readinessProbe:
+  enabled: false
+customLivenessProbe:
+  exec:
+    command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok']
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 6
+customReadinessProbe:
+  exec:
+    command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null']
+  initialDelaySeconds: 5
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 6
+```
+
+You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to
+
+```console
+zookeeper.root.logger=INFO, CONSOLE
+```
+
+the available appender is
+
+- CONSOLE
+- ROLLINGFILE
+- RFAAUDIT
+- TRACEFILE
+
+## Persistence
+
+The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+### Configure the data log directory
+
+You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior).
+
+When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information.
+
+### Set pod affinity
+
+This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 11.0.0
+
+This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values.
+
+### To 10.0.0
+
+This new version of the chart adds support for server-server authentication.
+The chart previously supported client-server authentication, to avoid confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`.
+
+### To 9.0.0
+
+This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed.
+
+### To 8.0.0
+
+This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository.
+
+Affected values:
+
+- `allowAnonymousLogin` is deprecated.
+- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map.
+- `service.port`, `service.tlsClientPort`, `service.followerPort`, and  `service.electionPort` have been regrouped under the `service.ports` map.
+- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map.
+- `podDisruptionBudget.*` parameters are renamed to `pdb.*`.
+
+### To 7.0.0
+
+This new version renames the parameters used to configure TLS for both client and quorum.
+
+- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort`
+- `service.tls.client_port` is renamed to `service.tlsClientPort`
+- `service.tls.client_enable` is renamed to `tls.client.enabled`
+- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath`
+- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath`
+- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword`
+- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword`
+- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled`
+- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath`
+- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath`
+- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword`
+- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword`
+
+### To 6.1.0
+
+This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
+
+### To 6.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/).
+
+### To 5.21.0
+
+A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones:
+
+- `metrics.port` is renamed to `metrics.containerPort`.
+- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`.
+
+### To 3.0.0
+
+This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade
+of the application, each node will need to have at least one snapshot file created in the data directory. If not, the
+new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056)
+in order to find ways to workaround this issue in case you are facing it.
+
+### To 2.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets.
+Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`:
+
+```console
+kubectl delete statefulset zookeeper-zookeeper --cascade=false
+```
+
+### To 1.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper:
+
+```console
+kubectl delete statefulset zookeeper-zookeeper --cascade=false
+```
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 22 - 0
clickhouse/charts/zookeeper/charts/common/.helmignore

@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 24 - 0
clickhouse/charts/zookeeper/charts/common/Chart.yaml

@@ -0,0 +1,24 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.2.4
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+  This chart is not deployable by itself.
+home: https://github.com/bitnami/charts/tree/main/bitnami/common
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: Bitnami
+  url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+- https://www.bitnami.com/
+type: library
+version: 2.2.4

+ 233 - 0
clickhouse/charts/zookeeper/charts/common/README.md

@@ -0,0 +1,233 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
+
+## TL;DR
+
+```yaml
+dependencies:
+  - name: common
+    version: 1.x.x
+    repository: https://charts.bitnami.com/bitnami
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.names.fullname" . }}
+data:
+  myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+  type: string
+  description: Docker registry where the image is located
+  example: docker.io
+
+repository:
+  type: string
+  description: Repository and image name
+  example: bitnami/nginx
+
+tag:
+  type: string
+  description: image tag
+  example: 1.16.1-debian-10-r63
+
+pullPolicy:
+  type: string
+  description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+  type: array
+  items:
+    type: string
+  description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+  type: boolean
+  description: Set to true if you would like to see extra information on logs
+  example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+  type: boolean
+  description: Whether enable persistence.
+  example: true
+
+storageClass:
+  type: string
+  description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+  example: "-"
+
+accessMode:
+  type: string
+  description: Access mode for the Persistent Volume Storage.
+  example: ReadWriteOnce
+
+size:
+  type: string
+  description: Size the Persistent Volume Storage.
+  example: 8Gi
+
+path:
+  type: string
+  description: Path to be persisted.
+  example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+  type: string
+  description: Name of the existing secret.
+  example: mySecret
+keyMapping:
+  description: Mapping between the expected key name and the name of the key in the existing secret.
+  type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+#   password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  labels:
+    app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+  password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+      env:
+        - name: PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+              key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+  password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+    'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+        export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+    'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+        export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 106 - 0
clickhouse/charts/zookeeper/charts/common/templates/_affinities.tpl

@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - preference:
+      matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  nodeSelectorTerms:
+    - matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.nodes.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.nodes.hard" . -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - podAffinityTerm:
+      labelSelector:
+        matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+          {{- if not (empty $component) }}
+          {{ printf "app.kubernetes.io/component: %s" $component }}
+          {{- end }}
+          {{- range $key, $value := $extraMatchLabels }}
+          {{ $key }}: {{ $value | quote }}
+          {{- end }}
+      topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  - labelSelector:
+      matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+        {{- if not (empty $component) }}
+        {{ printf "app.kubernetes.io/component: %s" $component }}
+        {{- end }}
+        {{- range $key, $value := $extraMatchLabels }}
+        {{ $key }}: {{ $value | quote }}
+        {{- end }}
+    topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.pods.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.pods.hard" . -}}
+  {{- end -}}
+{{- end -}}

+ 154 - 0
clickhouse/charts/zookeeper/charts/common/templates/_capabilities.tpl

@@ -0,0 +1,154 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+    {{- if .Values.global.kubeVersion }}
+    {{- .Values.global.kubeVersion -}}
+    {{- else }}
+    {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+    {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}"  structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
clickhouse/charts/zookeeper/charts/common/templates/_errors.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+  - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+  - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+  {{- $validationErrors := join "" .validationErrors -}}
+  {{- if and $validationErrors .context.Release.IsUpgrade -}}
+    {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+    {{- $errorString = print $errorString "\n                 Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+    {{- $errorString = print $errorString "\n                 Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+    {{- $errorString = print $errorString "\n%s" -}}
+    {{- printf $errorString $validationErrors | fail -}}
+  {{- end -}}
+{{- end -}}

+ 80 - 0
clickhouse/charts/zookeeper/charts/common/templates/_images.tpl

@@ -0,0 +1,80 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+    {{- if .global.imageRegistry }}
+     {{- $registryName = .global.imageRegistry -}}
+    {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+    {{- $separator = "@" -}}
+    {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+    {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+    {{- printf "%s%s%s"  $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+  {{- $pullSecrets := list }}
+
+  {{- if .global }}
+    {{- range .global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+  {{- $pullSecrets := list }}
+  {{- $context := .context }}
+
+  {{- if $context.Values.global }}
+    {{- range $context.Values.global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}

+ 68 - 0
clickhouse/charts/zookeeper/charts/common/templates/_ingress.tpl

@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+  - serviceName - String. Name of an existing service backend
+  - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+  name: {{ .serviceName }}
+  port:
+    {{- if typeIs "string" .servicePort }}
+    name: {{ .servicePort }}
+    {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+    number: {{ .servicePort | int }}
+    {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}

+ 18 - 0
clickhouse/charts/zookeeper/charts/common/templates/_labels.tpl

@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}

+ 66 - 0
clickhouse/charts/zookeeper/charts/common/templates/_names.tpl

@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}

+ 165 - 0
clickhouse/charts/zookeeper/charts/common/templates/_secrets.tpl

@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+  {{- if not (typeIs "string" .existingSecret) -}}
+    {{- if .existingSecret.keyMapping -}}
+      {{- $key = index .existingSecret.keyMapping $.key -}}
+    {{- end -}}
+  {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - length - int - Optional - Length of the generated random password.
+  - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+  - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+  - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+  1. Already existing 'Secret' resource
+     (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+  2. Password provided via the values.yaml
+     (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+  3. Randomly generated secret password
+     (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+  {{- if hasKey $secretData .key }}
+    {{- $password = index $secretData .key | quote }}
+  {{- else }}
+    {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+  {{- end -}}
+{{- else if $providedPasswordValue }}
+  {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+  {{- if .context.Values.enabled }}
+    {{- $subchart = $chartName }}
+  {{- end -}}
+
+  {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+  {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+  {{- $passwordValidationErrors := list $requiredPasswordError -}}
+  {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+  {{- if .strong }}
+    {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+    {{- $password = randAscii $passwordLength }}
+    {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+    {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+  {{- else }}
+    {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+  {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+  {{- $value = index $secretData .key -}}
+{{- else -}}
+  {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
clickhouse/charts/zookeeper/charts/common/templates/_storage.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return  the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- $storageClass = .global.storageClass -}}
+    {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+  {{- if (eq "-" $storageClass) -}}
+      {{- printf "storageClassName: \"\"" -}}
+  {{- else }}
+      {{- printf "storageClassName: %s" $storageClass -}}
+  {{- end -}}
+{{- end -}}
+
+{{- end -}}

+ 13 - 0
clickhouse/charts/zookeeper/charts/common/templates/_tplvalues.tpl

@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}

+ 62 - 0
clickhouse/charts/zookeeper/charts/common/templates/_utils.tpl

@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+  {{- $fieldNameSplit := splitList "-" .field -}}
+  {{- $upperCaseFieldNameSplit := list -}}
+
+  {{- range $fieldNameSplit -}}
+    {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+  {{- end -}}
+
+  {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+  {{- if not $latestObj -}}
+    {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+  {{- end -}}
+  {{- $value = ( index $latestObj . ) -}}
+  {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}} 
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+  {{- if $value -}}
+    {{- $key = . }}
+  {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}} 
+{{- end -}}

+ 14 - 0
clickhouse/charts/zookeeper/charts/common/templates/_warnings.tpl

@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}

+ 72 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl

@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+  {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+  {{- $enabled := include "common.cassandra.values.enabled" . -}}
+  {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+  {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.dbUser.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.cassandra.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+  {{- if .subchart -}}
+    cassandra.dbUser
+  {{- else -}}
+    dbUser
+  {{- end -}}
+{{- end -}}

+ 103 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+  {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mariadb.values.enabled" . -}}
+  {{- $architecture := include "common.mariadb.values.architecture" . -}}
+  {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mariadb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mariadb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 108 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl

@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MongoDB&reg; values are stored, e.g: "mongodb-passwords-secret"
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+  {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mongodb.values.enabled" . -}}
+  {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+  {{- $architecture := include "common.mongodb.values.architecture" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+  {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+  {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+    {{- if and $valueUsername $valueDatabase -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replicaset") -}}
+        {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mongodb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mongodb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}

+ 103 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_mysql.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+  {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mysql.values.enabled" . -}}
+  {{- $architecture := include "common.mysql.values.architecture" . -}}
+  {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mysql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+  {{- if .subchart -}}
+    mysql.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 129 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl

@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+  {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+  {{- $enabled := include "common.postgresql.values.enabled" . -}}
+  {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+  {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+    {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+    {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+    {{- if (eq $enabledReplication "true") -}}
+        {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+  - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+  {{- if .context.Values.global -}}
+    {{- if .context.Values.global.postgresql -}}
+      {{- index .context.Values.global.postgresql .key | quote -}}
+    {{- end -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+  {{- if .subchart -}}
+    {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+  {{- else -}}
+    {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+  {{- if not $globalValue -}}
+    {{- if .subchart -}}
+      postgresql.postgresqlPassword
+    {{- else -}}
+      postgresqlPassword
+    {{- end -}}
+  {{- else -}}
+    global.postgresql.postgresqlPassword
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+  {{- else -}}
+    {{- printf "%v" .context.Values.replication.enabled -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+  {{- if .subchart -}}
+    postgresql.replication.password
+  {{- else -}}
+    replication.password
+  {{- end -}}
+{{- end -}}

+ 76 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_redis.tpl

@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+  {{- $enabled := include "common.redis.values.enabled" . -}}
+  {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+  {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+  {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+  {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+  {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+  {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+    {{- if eq $useAuth "true" -}}
+      {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+      {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.redis.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+  {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+  {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+  {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+  {{- if $standarizedAuthValues -}}
+    {{- true -}}
+  {{- end -}}
+{{- end -}}

+ 46 - 0
clickhouse/charts/zookeeper/charts/common/templates/validations/_validations.tpl

@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+  {{- range .required -}}
+    {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+  - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+  {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+  {{- if not $value -}}
+    {{- $varname := "my-value" -}}
+    {{- $getCurrentValue := "" -}}
+    {{- if and .secret .field -}}
+      {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+      {{- $getCurrentValue = printf " To get the current value:\n\n        %s\n" (include "common.utils.secret.getvalue" .) -}}
+    {{- end -}}
+    {{- printf "\n    '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+  {{- end -}}
+{{- end -}}

+ 5 - 0
clickhouse/charts/zookeeper/charts/common/values.yaml

@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart

+ 76 - 0
clickhouse/charts/zookeeper/templates/NOTES.txt

@@ -0,0 +1,76 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }}
+-------------------------------------------------------------------------------
+ WARNING
+
+    By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true"
+    you have most likely exposed the ZooKeeper service externally without any
+    authentication mechanism.
+
+    For security reasons, we strongly suggest that you switch to "ClusterIP" or
+    "NodePort". As alternative, you can also specify a valid password on the
+    "auth.clientPassword" parameter.
+
+-------------------------------------------------------------------------------
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+  command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+  args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+  kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+  kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+    /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh
+
+{{- else }}
+
+ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster:
+
+    {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }}
+
+To connect to your ZooKeeper server run the following commands:
+
+    export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}")
+    kubectl exec -it $POD_NAME -- zkCli.sh
+
+To connect to your ZooKeeper server from outside the cluster execute the following commands:
+
+{{- if eq .Values.service.type "NodePort" }}
+
+    export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+    export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
+    zkCli.sh $NODE_IP:$NODE_PORT
+
+{{- else if eq .Values.service.type "LoadBalancer" }}
+
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}'
+
+    export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+    zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }}
+
+{{- else if eq .Values.service.type "ClusterIP" }}
+
+    kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} &
+    zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }}
+
+{{- end }}
+{{- end }}
+
+{{- include "zookeeper.validateValues" . }}
+{{- include "zookeeper.checkRollingTags" . }}

+ 361 - 0
clickhouse/charts/zookeeper/templates/_helpers.tpl

@@ -0,0 +1,361 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the proper ZooKeeper image name
+*/}}
+{{- define "zookeeper.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "zookeeper.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "zookeeper.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Check if there are rolling tags in the images
+*/}}
+{{- define "zookeeper.checkRollingTags" -}}
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- end -}}
+
+{{/*
+Return ZooKeeper Namespace to use
+*/}}
+{{- define "zookeeper.namespace" -}}
+{{- if .Values.namespaceOverride -}}
+    {{- .Values.namespaceOverride -}}
+{{- else -}}
+    {{- .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "zookeeper.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the ZooKeeper client-server authentication credentials secret
+*/}}
+{{- define "zookeeper.client.secretName" -}}
+{{- if .Values.auth.client.existingSecret -}}
+    {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}}
+{{- else -}}
+    {{- printf "%s-client-auth" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the ZooKeeper server-server authentication credentials secret
+*/}}
+{{- define "zookeeper.quorum.secretName" -}}
+{{- if .Values.auth.quorum.existingSecret -}}
+    {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ZooKeeper client-server authentication credentials secret object should be created
+*/}}
+{{- define "zookeeper.client.createSecret" -}}
+{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ZooKeeper server-server authentication credentials secret object should be created
+*/}}
+{{- define "zookeeper.quorum.createSecret" -}}
+{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns the available value for certain key in an existing secret (if it exists),
+otherwise it generates a random value.
+*/}}
+{{- define "getValueFromSecret" }}
+    {{- $len := (default 16 .Length) | int -}}
+    {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}}
+    {{- if $obj }}
+        {{- index $obj .Key | b64dec -}}
+    {{- else -}}
+        {{- randAlphaNum $len -}}
+    {{- end -}}
+{{- end }}
+
+{{/*
+Return the ZooKeeper configuration ConfigMap name
+*/}}
+{{- define "zookeeper.configmapName" -}}
+{{- if .Values.existingConfigmap -}}
+    {{- printf "%s" (tpl .Values.existingConfigmap $) -}}
+{{- else -}}
+    {{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ConfigMap object should be created for ZooKeeper configuration
+*/}}
+{{- define "zookeeper.createConfigmap" -}}
+{{- if and .Values.configuration (not .Values.existingConfigmap) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS secret should be created for ZooKeeper quorum
+*/}}
+{{- define "zookeeper.quorum.createTlsSecret" -}}
+{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the secret containing ZooKeeper quorum TLS certificates
+*/}}
+{{- define "zookeeper.quorum.tlsSecretName" -}}
+{{- $secretName := .Values.tls.quorum.existingSecret -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum
+*/}}
+{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}}
+{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name of the secret containing the Keystore and Truststore password
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordsSecret" -}}
+{{- $secretName := .Values.tls.quorum.passwordsSecretName -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS secret should be created for ZooKeeper client
+*/}}
+{{- define "zookeeper.client.createTlsSecret" -}}
+{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the secret containing ZooKeeper client TLS certificates
+*/}}
+{{- define "zookeeper.client.tlsSecretName" -}}
+{{- $secretName := .Values.tls.client.existingSecret -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-client-crt" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum keystore key to be retrieved from tls.quorum.existingSecret.
+*/}}
+{{- define "zookeeper.quorum.tlsKeystoreKey" -}}
+{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.keystore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum truststore key to be retrieved from tls.quorum.existingSecret.
+*/}}
+{{- define "zookeeper.quorum.tlsTruststoreKey" -}}
+{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.truststore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client keystore key to be retrieved from tls.client.existingSecret.
+*/}}
+{{- define "zookeeper.client.tlsKeystoreKey" -}}
+{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.keystore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client truststore key to be retrieved from tls.client.existingSecret.
+*/}}
+{{- define "zookeeper.client.tlsTruststoreKey" -}}
+{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.truststore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client
+*/}}
+{{- define "zookeeper.client.createTlsPasswordsSecret" -}}
+{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name of the secret containing the Keystore and Truststore password
+*/}}
+{{- define "zookeeper.client.tlsPasswordsSecret" -}}
+{{- $secretName := .Values.tls.client.passwordsSecretName -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName.
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}}
+{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "keystore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName.
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}}
+{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "truststore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client keystore password key to be retrieved from tls.client.passwordSecretName.
+*/}}
+{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}}
+{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "keystore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client truststore password key to be retrieved from tls.client.passwordSecretName.
+*/}}
+{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}}
+{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "truststore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "zookeeper.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Authentication enabled
+*/}}
+{{- define "zookeeper.validateValues.client.auth" -}}
+{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }}
+zookeeper: auth.client.enabled
+    In order to enable client-server authentication, you need to provide the list
+    of users to be created and the user to use for clients authentication.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Authentication enabled
+*/}}
+{{- define "zookeeper.validateValues.quorum.auth" -}}
+{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }}
+zookeeper: auth.quorum.enabled
+    In order to enable server-server authentication, you need to provide the list
+    of users to be created and the user to use for quorum authentication.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Client TLS enabled
+*/}}
+{{- define "zookeeper.validateValues.client.tls" -}}
+{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }}
+zookeeper: tls.client.enabled
+    In order to enable Client TLS encryption, you also need to provide
+    an existing secret containing the Keystore and Truststore or
+    enable auto-generated certificates.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Quorum TLS enabled
+*/}}
+{{- define "zookeeper.validateValues.quorum.tls" -}}
+{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }}
+zookeeper: tls.quorum.enabled
+    In order to enable Quorum TLS, you also need to provide
+    an existing secret containing the Keystore and Truststore or
+    enable auto-generated certificates.
+{{- end -}}
+{{- end -}}

+ 17 - 0
clickhouse/charts/zookeeper/templates/configmap.yaml

@@ -0,0 +1,17 @@
+{{- if (include "zookeeper.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  zoo.cfg: |-
+    {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }}
+{{- end }}

+ 4 - 0
clickhouse/charts/zookeeper/templates/extra-list.yaml

@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}

+ 29 - 0
clickhouse/charts/zookeeper/templates/metrics-svc.yaml

@@ -0,0 +1,29 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "common.names.fullname" . }}-metrics
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.metrics.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ .Values.metrics.service.type }}
+  ports:
+    - name: tcp-metrics
+      port: {{ .Values.metrics.service.port }}
+      targetPort: metrics
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+{{- end }}

+ 41 - 0
clickhouse/charts/zookeeper/templates/networkpolicy.yaml

@@ -0,0 +1,41 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  podSelector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+  policyTypes:
+    - Ingress
+  ingress:
+    # Allow inbound connections to ZooKeeper
+    - ports:
+        - port: {{ .Values.containerPorts.client }}
+        {{- if .Values.metrics.enabled }}
+        - port: {{ .Values.metrics.containerPort }}
+        {{- end }}
+      {{- if not .Values.networkPolicy.allowExternal }}
+      from:
+        - podSelector:
+            matchLabels:
+              {{ include "common.names.fullname" . }}-client: "true"
+        - podSelector:
+            matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+      {{- end }}
+    # Allow internal communications between nodes
+    - ports:
+        - port: {{ .Values.containerPorts.follower }}
+        - port: {{ .Values.containerPorts.election }}
+      from:
+        - podSelector:
+            matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+{{- end }}

+ 26 - 0
clickhouse/charts/zookeeper/templates/pdb.yaml

@@ -0,0 +1,26 @@
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and .Values.pdb.create (gt $replicaCount 1) }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if .Values.pdb.minAvailable }}
+  minAvailable: {{ .Values.pdb.minAvailable }}
+  {{- end  }}
+  {{- if .Values.pdb.maxUnavailable }}
+  maxUnavailable: {{ .Values.pdb.maxUnavailable }}
+  {{- end  }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: zookeeper
+{{- end }}

+ 27 - 0
clickhouse/charts/zookeeper/templates/prometheusrule.yaml

@@ -0,0 +1,27 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  {{- if .Values.metrics.prometheusRule.namespace }}
+  namespace: {{ .Values.metrics.prometheusRule.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.metrics.prometheusRule.additionalLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  groups:
+    - name: {{ include "common.names.fullname" . }}
+      rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }}
+{{- end }}
+

+ 102 - 0
clickhouse/charts/zookeeper/templates/scripts-configmap.yaml

@@ -0,0 +1,102 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  init-certs.sh: |-
+    #!/bin/bash
+
+    {{- if .Values.tls.client.enabled }}
+    if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then
+        if [[ -f "/opt/bitnami/zookeeper/config/certs/client/.initialized" ]]; then
+            exit 0
+        fi
+        openssl pkcs12 -export -in "/certs/client/tls.crt" \
+          -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -inkey "/certs/client/tls.key" \
+          -out "/tmp/keystore.p12"
+        keytool -importkeystore -srckeystore "/tmp/keystore.p12" \
+          -srcstoretype PKCS12 \
+          -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks"
+        rm "/tmp/keystore.p12"
+        keytool -import -file "/certs/client/ca.crt" \
+              -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \
+              -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \
+              -noprompt
+        touch /opt/bitnami/zookeeper/config/certs/client/.initialized
+    {{- if .Values.tls.client.autoGenerated }}
+    else
+        echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- else }}
+    elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then
+        cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks"
+        cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks"
+    else
+        echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- end }}
+    {{- end }}
+    {{- if .Values.tls.quorum.enabled }}
+    if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then
+        openssl pkcs12 -export -in "/certs/quorum/tls.crt" \
+          -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -inkey "/certs/quorum/tls.key" \
+          -out "/tmp/keystore.p12"
+        keytool -importkeystore -srckeystore "/tmp/keystore.p12" \
+          -srcstoretype PKCS12 \
+          -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks"
+        rm "/tmp/keystore.p12"
+        keytool -import -file "/certs/quorum/ca.crt" \
+              -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \
+              -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \
+              -noprompt
+    {{- if .Values.tls.quorum.autoGenerated }}
+    else
+        echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled."
+        exit 1
+    fi
+    {{- else }}
+    elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then
+        cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks"
+        cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks"
+    else
+        echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- end }}
+    {{- end }}
+  setup.sh: |-
+    #!/bin/bash
+
+    # Execute entrypoint as usual after obtaining ZOO_SERVER_ID
+    # check ZOO_SERVER_ID in persistent volume via myid
+    # if not present, set based on POD hostname
+    if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
+        export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
+    else
+        HOSTNAME="$(hostname -s)"
+        if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
+            ORD=${BASH_REMATCH[2]}
+            export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))"
+        else
+            echo "Failed to get index from hostname $HOST"
+            exit 1
+        fi
+    fi
+    exec /entrypoint.sh /run.sh

+ 77 - 0
clickhouse/charts/zookeeper/templates/secrets.yaml

@@ -0,0 +1,77 @@
+{{- if (include "zookeeper.client.createSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }}
+  server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }}
+  quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "common.names.fullname" . }}-client-tls-pass
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }}
+  truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "common.names.fullname" . }}-quorum-tls-pass
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }}
+  truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }}
+{{- end }}

+ 21 - 0
clickhouse/charts/zookeeper/templates/serviceaccount.yaml

@@ -0,0 +1,21 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "zookeeper.serviceAccountName" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    role: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  annotations:
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.serviceAccount.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}

+ 53 - 0
clickhouse/charts/zookeeper/templates/servicemonitor.yaml

@@ -0,0 +1,53 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.metrics.serviceMonitor.additionalLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if .Values.metrics.serviceMonitor.jobLabel }}
+  jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+  {{- end }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      {{- if .Values.metrics.serviceMonitor.selector }}
+      {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+      {{- end }}
+      app.kubernetes.io/component: metrics
+  endpoints:
+    - port: tcp-metrics
+      path: "/metrics"
+      {{- if .Values.metrics.serviceMonitor.interval }}
+      interval: {{ .Values.metrics.serviceMonitor.interval }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.relabelings }}
+      relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+      metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.honorLabels }}
+      honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+      {{- end }}
+  namespaceSelector:
+    matchNames:
+      - {{ template "zookeeper.namespace" . }}
+{{- end }}

+ 532 - 0
clickhouse/charts/zookeeper/templates/statefulset.yaml

@@ -0,0 +1,532 @@
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    role: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  podManagementPolicy: {{ .Values.podManagementPolicy }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: zookeeper
+  serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }}
+  {{- if .Values.updateStrategy }}
+  updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }}
+  {{- end }}
+  template:
+    metadata:
+      annotations:
+        {{- if .Values.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+        {{- if (include "zookeeper.createConfigmap" .) }}
+        checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+        {{- end }}
+        {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+        checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
+        {{- end }}
+        {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }}
+        checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }}
+        {{- end }}
+      labels: {{- include "common.labels.standard" . | nindent 8 }}
+        app.kubernetes.io/component: zookeeper
+        {{- if .Values.podLabels }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccountName: {{ template "zookeeper.serviceAccountName" . }}
+      {{- include "zookeeper.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.hostAliases }}
+      hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.affinity }}
+      affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }}
+      {{- else }}
+      affinity:
+        podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }}
+        podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }}
+        nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
+      {{- end }}
+      {{- if .Values.nodeSelector }}
+      nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.tolerations }}
+      tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.topologySpreadConstraints }}
+      topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.priorityClassName }}
+      priorityClassName: {{ .Values.priorityClassName }}
+      {{- end }}
+      {{- if .Values.schedulerName }}
+      schedulerName: {{ .Values.schedulerName }}
+      {{- end }}
+      {{- if .Values.podSecurityContext.enabled }}
+      securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
+      {{- end }}
+      initContainers:
+        {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ template "zookeeper.volumePermissions.image" . }}
+          imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+          args:
+            - -ec
+            - |
+              mkdir -p /bitnami/zookeeper
+              chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper
+              find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
+              {{- if .Values.dataLogDir }}
+              mkdir -p {{ .Values.dataLogDir }}
+              chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }}
+              find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
+              {{- end }}
+          {{- if .Values.volumePermissions.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.volumePermissions.resources }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: /bitnami/zookeeper
+            {{- if .Values.dataLogDir }}
+            - name: data-log
+              mountPath: {{ .Values.dataLogDir }}
+            {{- end }}
+        {{- end }}
+        {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }}
+        - name: init-certs
+          image: {{ include "zookeeper.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          command:
+            - /scripts/init-certs.sh
+          env:
+            - name: MY_POD_NAME
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.name
+            {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+          {{- if .Values.tls.resources }}
+          resources: {{- toYaml .Values.tls.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: scripts
+              mountPath: /scripts/init-certs.sh
+              subPath: init-certs.sh
+            {{- if or .Values.tls.client.enabled }}
+            - name: client-certificates
+              mountPath: /certs/client
+            - name: client-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/client
+            {{- end }}
+            {{- if or .Values.tls.quorum.enabled }}
+            - name: quorum-certificates
+              mountPath: /certs/quorum
+            - name: quorum-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/quorum
+            {{- end }}
+        {{- end }}
+        {{- if .Values.initContainers }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }}
+        {{- end }}
+      containers:
+        - name: zookeeper
+          image: {{ template "zookeeper.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+          {{- else if .Values.command }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+          {{- else if .Values.args }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.resources }}
+          resources: {{- toYaml .Values.resources | nindent 12 }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+            - name: ZOO_DATA_LOG_DIR
+              value: {{ .Values.dataLogDir | quote }}
+            - name: ZOO_PORT_NUMBER
+              value: {{ .Values.containerPorts.client | quote }}
+            - name: ZOO_TICK_TIME
+              value: {{ .Values.tickTime | quote }}
+            - name: ZOO_INIT_LIMIT
+              value: {{ .Values.initLimit | quote }}
+            - name: ZOO_SYNC_LIMIT
+              value: {{ .Values.syncLimit | quote }}
+            - name: ZOO_PRE_ALLOC_SIZE
+              value: {{ .Values.preAllocSize | quote }}
+            - name: ZOO_SNAPCOUNT
+              value: {{ .Values.snapCount | quote }}
+            - name: ZOO_MAX_CLIENT_CNXNS
+              value: {{ .Values.maxClientCnxns | quote }}
+            - name: ZOO_4LW_COMMANDS_WHITELIST
+              value: {{ .Values.fourlwCommandsWhitelist | quote }}
+            - name: ZOO_LISTEN_ALLIPS_ENABLED
+              value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }}
+            - name: ZOO_AUTOPURGE_INTERVAL
+              value: {{ .Values.autopurge.purgeInterval | quote }}
+            - name: ZOO_AUTOPURGE_RETAIN_COUNT
+              value: {{ .Values.autopurge.snapRetainCount | quote }}
+            - name: ZOO_MAX_SESSION_TIMEOUT
+              value: {{ .Values.maxSessionTimeout | quote }}
+            - name: ZOO_SERVERS
+              {{- $replicaCount := int .Values.replicaCount }}
+              {{- $minServerId := int .Values.minServerId }}
+              {{- $followerPort := int .Values.containerPorts.follower }}
+              {{- $electionPort := int .Values.containerPorts.election }}
+              {{- $releaseNamespace := include "zookeeper.namespace" . }}
+              {{- $zookeeperFullname := include "common.names.fullname" . }}
+              {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63  }}
+              {{- $clusterDomain := .Values.clusterDomain }}
+              value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }}
+            - name: ZOO_ENABLE_AUTH
+              value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }}
+            {{- if .Values.auth.client.enabled }}
+            - name: ZOO_CLIENT_USER
+              value: {{ .Values.auth.client.clientUser | quote }}
+            - name: ZOO_CLIENT_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.secretName" . }}
+                  key: client-password
+            - name: ZOO_SERVER_USERS
+              value: {{ .Values.auth.client.serverUsers | quote }}
+            - name: ZOO_SERVER_PASSWORDS
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.secretName" . }}
+                  key: server-password
+            {{- end }}
+            - name: ZOO_ENABLE_QUORUM_AUTH
+              value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }}
+            {{- if .Values.auth.quorum.enabled }}
+            - name: ZOO_QUORUM_LEARNER_USER
+              value: {{ .Values.auth.quorum.learnerUser | quote }}
+            - name: ZOO_QUORUM_LEARNER_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.secretName" . }}
+                  key: quorum-learner-password
+            - name: ZOO_QUORUM_SERVER_USERS
+              value: {{ .Values.auth.quorum.serverUsers | quote }}
+            - name: ZOO_QUORUM_SERVER_PASSWORDS
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.secretName" . }}
+                  key: quorum-server-password
+            {{- end }}
+            - name: ZOO_HEAP_SIZE
+              value: {{ .Values.heapSize | quote }}
+            - name: ZOO_LOG_LEVEL
+              value: {{ .Values.logLevel | quote }}
+            - name: ALLOW_ANONYMOUS_LOGIN
+              value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }}
+            {{- if .Values.jvmFlags }}
+            - name: JVMFLAGS
+              value: {{ .Values.jvmFlags | quote }}
+            {{- end }}
+            {{- if .Values.metrics.enabled }}
+            - name: ZOO_ENABLE_PROMETHEUS_METRICS
+              value: "yes"
+            - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER
+              value: {{ .Values.metrics.containerPort | quote }}
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: ZOO_TLS_PORT_NUMBER
+              value: {{ .Values.containerPorts.tls | quote }}
+            - name: ZOO_TLS_CLIENT_ENABLE
+              value: {{ .Values.tls.client.enabled | quote }}
+            - name: ZOO_TLS_CLIENT_AUTH
+              value: {{ .Values.tls.client.auth | quote }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_FILE
+              value: {{ .Values.tls.client.keystorePath | quote }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE
+              value: {{ .Values.tls.client.truststorePath | quote }}
+            {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- end }}
+            {{- if .Values.tls.quorum.enabled }}
+            - name: ZOO_TLS_QUORUM_ENABLE
+              value: {{ .Values.tls.quorum.enabled | quote }}
+            - name: ZOO_TLS_QUORUM_CLIENT_AUTH
+              value: {{ .Values.tls.quorum.auth | quote }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_FILE
+              value: {{ .Values.tls.quorum.keystorePath | quote }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE
+              value: {{ .Values.tls.quorum.truststorePath | quote }}
+            {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- end }}
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.name
+            {{- if .Values.extraEnvVars }}
+            {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
+            {{- end }}
+          {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+          envFrom:
+            {{- if .Values.extraEnvVarsCM }}
+            - configMapRef:
+                name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }}
+            {{- end }}
+            {{- if .Values.extraEnvVarsSecret }}
+            - secretRef:
+                name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }}
+            {{- end }}
+          {{- end }}
+          ports:
+            {{- if not .Values.service.disableBaseClientPort }}
+            - name: client
+              containerPort: {{ .Values.containerPorts.client }}
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: client-tls
+              containerPort: {{ .Values.containerPorts.tls }}
+            {{- end }}
+            - name: follower
+              containerPort: {{ .Values.containerPorts.follower }}
+            - name: election
+              containerPort: {{ .Values.containerPorts.election }}
+            {{- if .Values.metrics.enabled }}
+            - name: metrics
+              containerPort: {{ .Values.metrics.containerPort }}
+            {{- end }}
+          {{- if not .Values.diagnosticMode.enabled }}
+          {{- if .Values.customLivenessProbe }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
+          {{- else if .Values.livenessProbe.enabled }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }}
+            exec:
+              {{- if not .Values.service.disableBaseClientPort }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok']
+              {{- else if not .Values.tls.client.enabled }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok']
+              {{- else }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok']
+              {{- end }}
+          {{- end }}
+          {{- if .Values.customReadinessProbe }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
+          {{- else if .Values.readinessProbe.enabled }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }}
+            exec:
+              {{- if not .Values.service.disableBaseClientPort }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok']
+              {{- else if not .Values.tls.client.enabled }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok']
+              {{- else }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok']
+              {{- end }}
+          {{- end }}
+          {{- if .Values.customStartupProbe }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
+          {{- else if .Values.startupProbe.enabled }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }}
+            tcpSocket:
+              {{- if not .Values.service.disableBaseClientPort }}
+              port: client
+              {{- else }}
+              port: follower
+              {{- end }}
+          {{- end }}
+          {{- end }}
+          {{- if .Values.lifecycleHooks }}
+          lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: scripts
+              mountPath: /scripts/setup.sh
+              subPath: setup.sh
+            - name: data
+              mountPath: /bitnami/zookeeper
+            {{- if .Values.dataLogDir }}
+            - name: data-log
+              mountPath: {{ .Values.dataLogDir }}
+            {{- end }}
+            {{- if or .Values.configuration .Values.existingConfigmap }}
+            - name: config
+              mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg
+              subPath: zoo.cfg
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: client-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/client
+              readOnly: true
+            {{- end }}
+            {{- if .Values.tls.quorum.enabled }}
+            - name: quorum-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/quorum
+              readOnly: true
+            {{- end }}
+            {{- if .Values.extraVolumeMounts }}
+            {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }}
+            {{- end }}
+        {{- if .Values.sidecars }}
+        {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }}
+        {{- end }}
+      volumes:
+        - name: scripts
+          configMap:
+            name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+            defaultMode: 0755
+        {{- if or .Values.configuration .Values.existingConfigmap }}
+        - name: config
+          configMap:
+            name: {{ include "zookeeper.configmapName" . }}
+        {{- end }}
+        {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
+        - name: data
+          persistentVolumeClaim:
+            claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }}
+        {{- else if not .Values.persistence.enabled }}
+        - name: data
+          emptyDir: {}
+        {{- end }}
+        {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }}
+        - name: data-log
+          persistentVolumeClaim:
+            claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }}
+        {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }}
+        - name: data-log
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.tls.client.enabled }}
+        - name: client-certificates
+          secret:
+            secretName: {{ include "zookeeper.client.tlsSecretName" . }}
+            defaultMode: 256
+        - name: client-shared-certs
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.tls.quorum.enabled }}
+        - name: quorum-certificates
+          secret:
+            secretName: {{ include "zookeeper.quorum.tlsSecretName" . }}
+            defaultMode: 256
+        - name: quorum-shared-certs
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
+        {{- end }}
+  {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }}
+  volumeClaimTemplates:
+    {{- if not .Values.persistence.existingClaim }}
+    - metadata:
+        name: data
+        {{- if .Values.persistence.annotations }}
+        annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
+        {{- end }}
+        {{- if .Values.persistence.labels }}
+        labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+        {{- range .Values.persistence.accessModes }}
+          - {{ . | quote }}
+        {{- end }}
+        resources:
+          requests:
+            storage: {{ .Values.persistence.size | quote }}
+        {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }}
+        {{- if .Values.persistence.selector }}
+        selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }}
+        {{- end }}
+    {{- end }}
+    {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }}
+    - metadata:
+        name: data-log
+        {{- if .Values.persistence.annotations }}
+        annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
+        {{- end }}
+        {{- if .Values.persistence.labels }}
+        labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+        {{- range .Values.persistence.accessModes }}
+          - {{ . | quote }}
+        {{- end }}
+        resources:
+          requests:
+            storage: {{ .Values.persistence.dataLogDir.size | quote }}
+        {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }}
+        {{- if .Values.persistence.dataLogDir.selector }}
+        selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }}
+        {{- end }}
+    {{- end }}
+  {{- end }}

+ 42 - 0
clickhouse/charts/zookeeper/templates/svc-headless.yaml

@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.commonAnnotations .Values.service.headless.annotations }}
+  annotations:
+    {{- if .Values.service.headless.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: ClusterIP
+  clusterIP: None
+  publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }}
+  ports:
+    {{- if not .Values.service.disableBaseClientPort }}
+    - name: tcp-client
+      port: {{ .Values.service.ports.client }}
+      targetPort: client
+    {{- end }}
+    {{- if .Values.tls.client.enabled }}
+    - name: tcp-client-tls
+      port: {{ .Values.service.ports.tls }}
+      targetPort: client-tls
+    {{- end }}
+    - name: tcp-follower
+      port: {{ .Values.service.ports.follower }}
+      targetPort: follower
+    - name: tcp-election
+      port: {{ .Values.service.ports.election }}
+      targetPort: election
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper

+ 71 - 0
clickhouse/charts/zookeeper/templates/svc.yaml

@@ -0,0 +1,71 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.commonAnnotations .Values.service.annotations }}
+  annotations:
+    {{- if .Values.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ .Values.service.type }}
+  {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }}
+  clusterIP: {{ .Values.service.clusterIP }}
+  {{- end }}
+  {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
+  externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }}
+  loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.service.sessionAffinity }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinityConfig }}
+  sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }}
+  {{- end }}
+  ports:
+    {{- if not .Values.service.disableBaseClientPort }}
+    - name: tcp-client
+      port: {{ .Values.service.ports.client }}
+      targetPort: client
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }}
+      nodePort: {{ .Values.service.nodePorts.client }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if .Values.tls.client.enabled }}
+    - name: tcp-client-tls
+      port: {{ .Values.service.ports.tls }}
+      targetPort: client-tls
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }}
+      nodePort: {{ .Values.service.nodePorts.tls }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    - name: tcp-follower
+      port: {{ .Values.service.ports.follower }}
+      targetPort: follower
+    - name: tcp-election
+      port: {{ .Values.service.ports.election }}
+      targetPort: election
+    {{- if .Values.service.extraPorts }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
+    {{- end }}
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper

+ 57 - 0
clickhouse/charts/zookeeper/templates/tls-secrets.yaml

@@ -0,0 +1,57 @@
+{{- if (include "zookeeper.client.createTlsSecret" .) }}
+{{- $secretName := printf "%s-client-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "zookeeper-client-ca" 365 }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $serviceName := include "common.names.fullname" . }}
+{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createTlsSecret" .) }}
+{{- $secretName := printf "%s-quorum-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "zookeeper-quorum-ca" 365 }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $serviceName := include "common.names.fullname" . }}
+{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}

+ 879 - 0
clickhouse/charts/zookeeper/values.yaml

@@ -0,0 +1,879 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+  imageRegistry: ""
+  ## E.g.
+  ## imagePullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  imagePullSecrets: []
+  storageClass: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Extra objects to deploy (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## @param namespaceOverride Override namespace for ZooKeeper resources
+## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent
+##
+namespaceOverride: ""
+
+## Enable diagnostic mode in the statefulset
+##
+diagnosticMode:
+  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+  ##
+  enabled: false
+  ## @param diagnosticMode.command Command to override all containers in the statefulset
+  ##
+  command:
+    - sleep
+  ## @param diagnosticMode.args Args to override all containers in the statefulset
+  ##
+  args:
+    - infinity
+
+## @section ZooKeeper chart parameters
+
+## Bitnami ZooKeeper image version
+## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/
+## @param image.registry ZooKeeper image registry
+## @param image.repository ZooKeeper image repository
+## @param image.tag ZooKeeper image tag (immutable tags are recommended)
+## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy ZooKeeper image pull policy
+## @param image.pullSecrets Specify docker-registry secret names as an array
+## @param image.debug Specify if debug values should be set
+##
+image:
+  registry: docker.io
+  repository: bitnami/zookeeper
+  tag: 3.8.1-debian-11-r15
+  digest: ""
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ## Example:
+  ## pullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  pullSecrets: []
+  ## Set to true if you would like to see extra information on logs
+  ##
+  debug: false
+## Authentication parameters
+##
+auth:
+  client:
+    ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5
+    ##
+    enabled: false
+    ## @param auth.client.clientUser User that will use ZooKeeper clients to auth
+    ##
+    clientUser: ""
+    ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth
+    ##
+    clientPassword: ""
+    ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created
+    ## Specify them as a string, for example: "user1,user2,admin"
+    ##
+    serverUsers: ""
+    ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
+    ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
+    ##
+    serverPasswords: ""
+    ## @param auth.client.existingSecret Use existing secret (ignores previous passwords)
+    ##
+    existingSecret: ""
+  quorum:
+    ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5
+    ##
+    enabled: false
+    ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
+    ## Note: Make sure the user is included in auth.quorum.serverUsers
+    ##
+    learnerUser: ""
+    ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
+    ##
+    learnerPassword: ""
+    ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers.
+    ## Specify them as a string, for example: "user1,user2,admin"
+    ##
+    serverUsers: ""
+    ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
+    ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
+    ##
+    serverPasswords: ""
+    ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords)
+    ##
+    existingSecret: ""
+## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats
+##
+tickTime: 2000
+## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader
+##
+initLimit: 10
+## @param syncLimit How far out of date a server can be from a leader
+##
+syncLimit: 5
+## @param preAllocSize Block size for transaction log file
+##
+preAllocSize: 65536
+## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled)
+##
+snapCount: 100000
+## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble
+##
+maxClientCnxns: 60
+## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate
+## Defaults to 20 times the tickTime
+##
+maxSessionTimeout: 40000
+## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms)
+## This env var is ignored if Xmx an Xms are configured via `jvmFlags`
+##
+heapSize: 1024
+## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed
+##
+fourlwCommandsWhitelist: srvr, mntr, ruok
+## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively
+## Servers increment their ID starting at this minimal value.
+## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively.
+##
+minServerId: 1
+## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses
+##
+listenOnAllIPs: false
+## Ongoing data directory cleanup configuration
+##
+autopurge:
+  ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain
+  ##
+  snapRetainCount: 3
+  ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered
+  ## Set to a positive integer to enable the auto purging
+  ##
+  purgeInterval: 0
+## @param logLevel Log level for the ZooKeeper server. ERROR by default
+## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs
+##
+logLevel: ERROR
+## @param jvmFlags Default JVM flags for the ZooKeeper process
+##
+jvmFlags: ""
+## @param dataLogDir Dedicated data log directory
+## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots.
+## E.g.
+## dataLogDir: /bitnami/zookeeper/dataLog
+##
+dataLogDir: ""
+## @param configuration Configure ZooKeeper with a custom zoo.cfg file
+## e.g:
+## configuration: |-
+##   deploy-working-dir=/bitnami/geode/data
+##   log-level=info
+##   ...
+##
+configuration: ""
+## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper
+## NOTE: When it's set the `configuration` parameter is ignored
+##
+existingConfigmap: ""
+## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes
+## e.g:
+## extraEnvVars:
+##   - name: FOO
+##     value: "bar"
+##
+extraEnvVars: []
+## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes
+##
+extraEnvVarsCM: ""
+## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes
+##
+extraEnvVarsSecret: ""
+## @param command Override default container command (useful when using custom images)
+##
+command:
+  - /scripts/setup.sh
+## @param args Override default container args (useful when using custom images)
+##
+args: []
+
+## @section Statefulset parameters
+
+## @param replicaCount Number of ZooKeeper nodes
+##
+replicaCount: 1
+## @param containerPorts.client ZooKeeper client container port
+## @param containerPorts.tls ZooKeeper TLS container port
+## @param containerPorts.follower ZooKeeper follower container port
+## @param containerPorts.election ZooKeeper election container port
+##
+containerPorts:
+  client: 2181
+  tls: 3181
+  follower: 2888
+  election: 3888
+## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers
+## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+## @param livenessProbe.periodSeconds Period seconds for livenessProbe
+## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
+## @param livenessProbe.successThreshold Success threshold for livenessProbe
+## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+  probeCommandTimeout: 2
+## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers
+## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+## @param readinessProbe.periodSeconds Period seconds for readinessProbe
+## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
+## @param readinessProbe.successThreshold Success threshold for readinessProbe
+## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe
+##
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 5
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+  probeCommandTimeout: 2
+## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers
+## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+## @param startupProbe.periodSeconds Period seconds for startupProbe
+## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
+## @param startupProbe.failureThreshold Failure threshold for startupProbe
+## @param startupProbe.successThreshold Success threshold for startupProbe
+##
+startupProbe:
+  enabled: false
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 1
+  failureThreshold: 15
+  successThreshold: 1
+## @param customLivenessProbe Custom livenessProbe that overrides the default one
+##
+customLivenessProbe: {}
+## @param customReadinessProbe Custom readinessProbe that overrides the default one
+##
+customReadinessProbe: {}
+## @param customStartupProbe Custom startupProbe that overrides the default one
+##
+customStartupProbe: {}
+## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup
+##
+lifecycleHooks: {}
+## ZooKeeper resource requests and limits
+## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+## @param resources.limits The resources limits for the ZooKeeper containers
+## @param resources.requests.memory The requested memory for the ZooKeeper containers
+## @param resources.requests.cpu The requested cpu for the ZooKeeper containers
+##
+resources:
+  limits: {}
+  requests:
+    memory: 256Mi
+    cpu: 250m
+## Configure Pods Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context
+## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup
+##
+podSecurityContext:
+  enabled: true
+  fsGroup: 1001
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context
+## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser
+## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot
+## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege
+##
+containerSecurityContext:
+  enabled: true
+  runAsUser: 1001
+  runAsNonRoot: true
+  allowPrivilegeEscalation: false
+## @param hostAliases ZooKeeper pods host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+## @param podLabels Extra labels for ZooKeeper pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+## @param podAnnotations Annotations for ZooKeeper pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAffinityPreset: ""
+## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAntiAffinityPreset: soft
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+##
+nodeAffinityPreset:
+  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+  ##
+  type: ""
+  ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
+  ## E.g.
+  ## key: "kubernetes.io/e2e-az-name"
+  ##
+  key: ""
+  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
+  ## E.g.
+  ## values:
+  ##   - e2e-az1
+  ##   - e2e-az2
+  ##
+  values: []
+## @param affinity Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
+##
+affinity: {}
+## @param nodeSelector Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+## @param tolerations Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+##
+topologySpreadConstraints: []
+## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel`
+## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
+##
+podManagementPolicy: Parallel
+## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand
+## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+priorityClassName: ""
+## @param schedulerName Kubernetes pod scheduler registry
+## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+## @param updateStrategy.type ZooKeeper statefulset strategy type
+## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+##
+updateStrategy:
+  type: RollingUpdate
+  rollingUpdate: {}
+## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumes:
+## - name: zookeeper-keystore
+##   secret:
+##     defaultMode: 288
+##     secretName: zookeeper-keystore
+## - name: zookeeper-truststore
+##   secret:
+##     defaultMode: 288
+##     secretName: zookeeper-truststore
+##
+extraVolumes: []
+## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumeMounts:
+## - name: zookeeper-keystore
+##   mountPath: /certs/keystore
+##   readOnly: true
+## - name: zookeeper-truststore
+##   mountPath: /certs/truststore
+##   readOnly: true
+##
+extraVolumeMounts: []
+## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s)
+## e.g:
+## sidecars:
+##   - name: your-image-name
+##     image: your-image
+##     imagePullPolicy: Always
+##     ports:
+##       - name: portname
+##         containerPort: 1234
+##
+sidecars: []
+## @param initContainers Add additional init containers to the ZooKeeper pod(s)
+## Example:
+## initContainers:
+##   - name: your-image-name
+##     image: your-image
+##     imagePullPolicy: Always
+##     ports:
+##       - name: portname
+##         containerPort: 1234
+##
+initContainers: []
+## ZooKeeper Pod Disruption Budget
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+## @param pdb.create Deploy a pdb object for the ZooKeeper pod
+## @param pdb.minAvailable Minimum available ZooKeeper replicas
+## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas
+##
+pdb:
+  create: false
+  minAvailable: ""
+  maxUnavailable: 1
+
+## @section Traffic Exposure parameters
+
+service:
+  ## @param service.type Kubernetes Service type
+  ##
+  type: ClusterIP
+  ## @param service.ports.client ZooKeeper client service port
+  ## @param service.ports.tls ZooKeeper TLS service port
+  ## @param service.ports.follower ZooKeeper follower service port
+  ## @param service.ports.election ZooKeeper election service port
+  ##
+  ports:
+    client: 2181
+    tls: 3181
+    follower: 2888
+    election: 3888
+  ## Node ports to expose
+  ## NOTE: choose port between <30000-32767>
+  ## @param service.nodePorts.client Node port for clients
+  ## @param service.nodePorts.tls Node port for TLS
+  ##
+  nodePorts:
+    client: ""
+    tls: ""
+  ## @param service.disableBaseClientPort Remove client port from service definitions.
+  ##
+  disableBaseClientPort: false
+  ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
+  ## Values: ClientIP or None
+  ## ref: https://kubernetes.io/docs/user-guide/services/
+  ##
+  sessionAffinity: None
+  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
+  ## sessionAffinityConfig:
+  ##   clientIP:
+  ##     timeoutSeconds: 300
+  ##
+  sessionAffinityConfig: {}
+  ## @param service.clusterIP ZooKeeper service Cluster IP
+  ## e.g.:
+  ## clusterIP: None
+  ##
+  clusterIP: ""
+  ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP
+  ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  loadBalancerIP: ""
+  ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources
+  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+  ## e.g:
+  ## loadBalancerSourceRanges:
+  ##   - 10.10.10.0/24
+  ##
+  loadBalancerSourceRanges: []
+  ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy
+  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## @param service.annotations Additional custom annotations for ZooKeeper service
+  ##
+  annotations: {}
+  ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value)
+  ##
+  extraPorts: []
+  ## @param service.headless.annotations Annotations for the Headless Service
+  ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods
+  ## @param service.headless.servicenameOverride String to partially override headless service name
+  ##
+  headless:
+    publishNotReadyAddresses: true
+    annotations: {}
+    servicenameOverride: ""
+## Network policies
+## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
+##
+networkPolicy:
+  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
+  ##
+  enabled: false
+  ## @param networkPolicy.allowExternal Don't require client label for connections
+  ## When set to false, only pods with the correct client label will have network access to the port Redis&reg; is
+  ## listening on. When true, zookeeper accept connections from any source (with the correct destination port).
+  ##
+  allowExternal: true
+
+## @section Other Parameters
+
+## Service account for ZooKeeper to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+  ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod
+  ##
+  create: false
+  ## @param serviceAccount.name The name of the ServiceAccount to use.
+  ## If not set and create is true, a name is generated using the common.names.fullname template
+  ##
+  name: ""
+  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+  ##
+  automountServiceAccountToken: true
+  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+  ##
+  annotations: {}
+
+## @section Persistence parameters
+
+## Enable persistence using Persistent Volume Claims
+## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+  ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir
+  ##
+  enabled: true
+  ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica)
+  ##
+  existingClaim: ""
+  ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  storageClass: ""
+  ## @param persistence.accessModes PVC Access modes
+  ##
+  accessModes:
+    - ReadWriteOnce
+  ## @param persistence.size PVC Storage Request for ZooKeeper data volume
+  ##
+  size: 8Gi
+  ## @param persistence.annotations Annotations for the PVC
+  ##
+  annotations: {}
+  ## @param persistence.labels Labels for the PVC
+  ##
+  labels: {}
+  ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC
+  ## If set, the PVC can't have a PV dynamically provisioned for it
+  ## E.g.
+  ## selector:
+  ##   matchLabels:
+  ##     app: my-app
+  ##
+  selector: {}
+  ## Persistence for a dedicated data log directory
+  ##
+  dataLogDir:
+    ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory
+    ##
+    size: 8Gi
+    ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory
+    ## If defined, PVC must be created manually before volume will be bound
+    ## The value is evaluated as a template
+    ##
+    existingClaim: ""
+    ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC
+    ## If set, the PVC can't have a PV dynamically provisioned for it
+    ## E.g.
+    ## selector:
+    ##   matchLabels:
+    ##     app: my-app
+    ##
+    selector: {}
+
+## @section Volume Permissions parameters
+##
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+  ##
+  enabled: false
+  ## @param volumePermissions.image.registry Init container volume-permissions image registry
+  ## @param volumePermissions.image.repository Init container volume-permissions image repository
+  ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+  ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+  ##
+  image:
+    registry: docker.io
+    repository: bitnami/bitnami-shell
+    tag: 11-debian-11-r98
+    digest: ""
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ## Example:
+    ## pullSecrets:
+    ##   - myRegistryKeySecretName
+    ##
+    pullSecrets: []
+  ## Init container resource requests and limits
+  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
+  ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
+  ##
+  resources:
+    limits: {}
+    requests: {}
+  ## Init container' Security Context
+  ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+  ## and not the below volumePermissions.containerSecurityContext.runAsUser
+  ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context
+  ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+  ##
+  containerSecurityContext:
+    enabled: true
+    runAsUser: 0
+
+## @section Metrics parameters
+##
+
+## ZooKeeper Prometheus Exporter configuration
+##
+metrics:
+  ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint
+  ##
+  enabled: false
+  ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port
+  ##
+  containerPort: 9141
+  ## Service configuration
+  ##
+  service:
+    ## @param metrics.service.type ZooKeeper Prometheus Exporter service type
+    ##
+    type: ClusterIP
+    ## @param metrics.service.port ZooKeeper Prometheus Exporter service port
+    ##
+    port: 9141
+    ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "{{ .Values.metrics.service.port }}"
+      prometheus.io/path: "/metrics"
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+    ##
+    enabled: false
+    ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+    ##
+    namespace: ""
+    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    interval: ""
+    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    scrapeTimeout: ""
+    ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+    ##
+    additionalLabels: {}
+    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    selector: {}
+    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+    ##
+    relabelings: []
+    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+    ##
+    metricRelabelings: []
+    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+    ##
+    honorLabels: false
+    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+    ##
+    jobLabel: ""
+  ## Prometheus Operator PrometheusRule configuration
+  ##
+  prometheusRule:
+    ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
+    ##
+    enabled: false
+    ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
+    ##
+    namespace: ""
+    ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
+    ##
+    additionalLabels: {}
+    ## @param metrics.prometheusRule.rules PrometheusRule definitions
+    ##  - alert: ZooKeeperSyncedFollowers
+    ##    annotations:
+    ##      message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one).
+    ##    expr: max(synced_followers{service="my-release-metrics"}) < 2
+    ##    for: 5m
+    ##    labels:
+    ##      severity: critical
+    ##  - alert: ZooKeeperOutstandingRequests
+    ##    annotations:
+    ##      message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole.
+    ##    expr: outstanding_requests{service="my-release-metrics"} > 10
+    ##    for: 5m
+    ##    labels:
+    ##      severity: critical
+    ##
+    rules: []
+
+## @section TLS/SSL parameters
+##
+
+## Enable SSL/TLS encryption
+##
+tls:
+  client:
+    ## @param tls.client.enabled Enable TLS for client connections
+    ##
+    enabled: false
+    ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need".
+    ##
+    auth: "none"
+    ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications
+    ## Currently only supports PEM certificates
+    ##
+    autoGenerated: false
+    ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications
+    ##
+    existingSecret: ""
+    ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore.
+    ##
+    existingSecretKeystoreKey: ""
+    ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore.
+    ##
+    existingSecretTruststoreKey: ""
+    ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections
+    ##
+    keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
+    ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections
+    ##
+    truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
+    ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords
+    ##
+    passwordsSecretName: ""
+    ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore.
+    ##
+    passwordsSecretKeystoreKey: ""
+    ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore.
+    ##
+    passwordsSecretTruststoreKey: ""
+    ## @param tls.client.keystorePassword Password to access KeyStore if needed
+    ##
+    keystorePassword: ""
+    ## @param tls.client.truststorePassword Password to access TrustStore if needed
+    ##
+    truststorePassword: ""
+  quorum:
+    ## @param tls.quorum.enabled Enable TLS for quorum protocol
+    ##
+    enabled: false
+    ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need".
+    ##
+    auth: "none"
+    ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates.
+    ##
+    autoGenerated: false
+    ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol
+    ##
+    existingSecret: ""
+    ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore.
+    ##
+    existingSecretKeystoreKey: ""
+    ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore.
+    ##
+    existingSecretTruststoreKey: ""
+    ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol
+    ##
+    keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
+    ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol
+    ##
+    truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
+    ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords
+    ##
+    passwordsSecretName: ""
+    ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore.
+    ##
+    passwordsSecretKeystoreKey: ""
+    ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore.
+    ##
+    passwordsSecretTruststoreKey: ""
+    ## @param tls.quorum.keystorePassword Password to access KeyStore if needed
+    ##
+    keystorePassword: ""
+    ## @param tls.quorum.truststorePassword Password to access TrustStore if needed
+    ##
+    truststorePassword: ""
+  ## Init container resource requests and limits
+  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ## @param tls.resources.limits The resources limits for the TLS init container
+  ## @param tls.resources.requests The requested resources for the TLS init container
+  ##
+  resources:
+    limits: {}
+    requests: {}

+ 58 - 0
clickhouse/templates/NOTES.txt

@@ -0,0 +1,58 @@
+CHART NAME: {{ .Chart.Name  }}
+CHART VERSION: {{ .Chart.Version  }}
+APP VERSION: {{ .Chart.AppVersion  }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+  command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+  args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+  kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+  kubectl exec --namespace {{ include "common.names.namespace" . | quote }} -ti <NAME OF THE POD> -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+    /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh
+
+{{- else }}
+
+ClickHouse is available in the following address:
+
+{{- if .Values.externalAccess.enabled }}
+
+NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+
+    kubectl get svc --namespace {{ template "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=clickhouse" -w
+
+{{- else if (eq "LoadBalancer" .Values.service.type) }}
+
+    export SERVICE_IP=$(kubectl get svc --namespace {{ template "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+
+{{- else if (eq "NodePort" .Values.service.type)}}
+
+    export NODE_IP=$(kubectl get nodes --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+    export NODE_PORT=$(kubectl get --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
+
+{{- else if (eq "ClusterIP" .Values.service.type)}}
+
+    kubectl port-forward --namespace {{ template "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.tcp }}:9000 &
+
+{{- end }}
+
+Credentials:
+
+    echo "Username      : {{ .Values.auth.username }}"
+    echo "Password      : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "clickhouse.secretName" . }} -o jsonpath="{.data.{{ include "clickhouse.secretKey" .}}}" | base64 -d)"
+
+{{- end }}
+
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "clickhouse.validateValues" . }}

+ 202 - 0
clickhouse/templates/_helpers.tpl

@@ -0,0 +1,202 @@
+{{/*
+Return the proper ClickHouse image name
+*/}}
+{{- define "clickhouse.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "clickhouse.volumePermissions.image" -}}
+{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "clickhouse.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS credentials secret object should be created
+*/}}
+{{- define "clickhouse.createTlsSecret" -}}
+{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "clickhouse.tlsSecretName" -}}
+{{- if .Values.tls.autoGenerated }}
+    {{- printf "%s-crt" (include "common.names.fullname" .) -}}
+{{- else -}}
+    {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert file.
+*/}}
+{{- define "clickhouse.tlsCert" -}}
+{{- if .Values.tls.autoGenerated }}
+    {{- printf "/opt/bitnami/clickhouse/certs/tls.crt" -}}
+{{- else -}}
+    {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the cert key file.
+*/}}
+{{- define "clickhouse.tlsCertKey" -}}
+{{- if .Values.tls.autoGenerated }}
+    {{- printf "/opt/bitnami/clickhouse/certs/tls.key" -}}
+{{- else -}}
+{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "clickhouse.tlsCACert" -}}
+{{- if .Values.tls.autoGenerated }}
+    {{- printf "/opt/bitnami/clickhouse/certs/ca.crt" -}}
+{{- else -}}
+    {{- printf "/opt/bitnami/clickhouse/certs/%s" .Values.tls.certCAFilename -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the ClickHouse configuration configmap.
+*/}}
+{{- define "clickhouse.configmapName" -}}
+{{- if .Values.existingOverridesConfigmap -}}
+    {{- .Values.existingOverridesConfigmap -}}
+{{- else }}
+    {{- printf "%s" (include "common.names.fullname" . ) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the ClickHouse configuration configmap.
+*/}}
+{{- define "clickhouse.extraConfigmapName" -}}
+{{- if .Values.extraOverridesConfigmap -}}
+    {{- .Values.extraOverridesConfigmap -}}
+{{- else }}
+    {{- printf "%s-extra" (include "common.names.fullname" . ) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the Clickhouse password secret name
+*/}}
+{{- define "clickhouse.secretName" -}}
+{{- if .Values.auth.existingSecret -}}
+    {{- .Values.auth.existingSecret -}}
+{{- else }}
+    {{- printf "%s" (include "common.names.fullname" . ) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the ClickHouse password key inside the secret
+*/}}
+{{- define "clickhouse.secretKey" -}}
+{{- if .Values.auth.existingSecret -}}
+    {{- .Values.auth.existingSecretKey -}}
+{{- else }}
+    {{- print "admin-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the startialization scripts Secret name.
+*/}}
+{{- define "clickhouse.startdbScriptsSecret" -}}
+{{- if .Values.startdbScriptsSecret -}}
+    {{- printf "%s" (tpl .Values.startdbScriptsSecret $) -}}
+{{- else -}}
+    {{- printf "%s-start-scripts" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts Secret name.
+*/}}
+{{- define "clickhouse.initdbScriptsSecret" -}}
+{{- if .Values.initdbScriptsSecret -}}
+    {{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}}
+{{- else -}}
+    {{- printf "%s-init-scripts" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "clickhouse.headlessServiceName" -}}
+{{-  printf "%s-headless" (include "common.names.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "clickhouse.zookeeper.fullname" -}}
+{{- include "common.names.dependency.fullname" (dict "chartName" "zookeeper" "chartValues" .Values.zookeeper "context" $) -}}
+{{- end -}}
+
+{{/*
+Return the path to the CA cert file.
+*/}}
+{{- define "clickhouse.zookeeper.headlessServiceName" -}}
+{{-  printf "%s-headless" (include "clickhouse.zookeeper.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "clickhouse.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "clickhouse.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "clickhouse.validateValues.zookeeper" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of ClickHouse - [Zoo]keeper */}}
+{{- define "clickhouse.validateValues.zookeeper" -}}
+{{- if or (and .Values.keeper.enabled .Values.zookeeper.enabled) (and .Values.keeper.enabled .Values.externalZookeeper.servers) (and .Values.zookeeper.enabled .Values.externalZookeeper.servers) -}}
+clickhouse: Multiple [Zoo]keeper
+    You can only use one [zoo]keeper
+    Please choose use ClickHouse keeper or 
+    installing a Zookeeper chart (--set zookeeper.enabled=true) or
+    using an external instance (--set zookeeper.servers )
+{{- end -}}
+{{- if and (not .Values.keeper.enabled) (not .Values.zookeeper.enabled) (not .Values.externalZookeeper.servers) (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1) -}}
+clickhouse: No [Zoo]keeper
+    If you are deploying more than one ClickHouse instance, you need to enable [Zoo]keeper. Please choose installing a [Zoo]keeper (--set keeper.enabled=true) or (--set zookeeper.enabled=true) or
+    using an external instance (--set zookeeper.servers )
+{{- end -}}
+{{- end -}}

+ 18 - 0
clickhouse/templates/configmap-extra.yaml

@@ -0,0 +1,18 @@
+{{- if and .Values.extraOverrides (not .Values.extraOverridesConfigmap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-extra" (include "common.names.fullname" .) }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  01_extra_overrides.xml: |
+    {{- include "common.tplvalues.render" (dict "value" .Values.extraOverrides "context" $) | nindent 4 }}
+{{- end }}

+ 18 - 0
clickhouse/templates/configmap.yaml

@@ -0,0 +1,18 @@
+{{- if not .Values.existingOverridesConfigmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  00_default_overrides.xml: |
+    {{- include "common.tplvalues.render" (dict "value" .Values.defaultConfigurationOverrides "context" $) | nindent 4 }}
+{{- end }}

+ 4 - 0
clickhouse/templates/extra-list.yaml

@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}

+ 45 - 0
clickhouse/templates/ingress-tls-secrets.yaml

@@ -0,0 +1,45 @@
+{{- if .Values.ingress.enabled }}
+{{- if .Values.ingress.secrets }}
+{{- range .Values.ingress.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ .name }}
+  namespace: {{ $.Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" $ | nindent 4 }}
+    {{- if $.Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if $.Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ .certificate | b64enc }}
+  tls.key: {{ .key | b64enc }}
+---
+{{- end }}
+{{- end }}
+{{- if and .Values.ingress.tls .Values.ingress.selfSigned }}
+{{- $secretName := printf "%s-tls" .Values.ingress.hostname }}
+{{- $ca := genCA "clickhouse-ca" 365 }}
+{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
+{{- end }}

+ 60 - 0
clickhouse/templates/ingress.yaml

@@ -0,0 +1,60 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
+kind: Ingress
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  annotations:
+    {{- if .Values.ingress.annotations }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+spec:
+  {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
+  ingressClassName: {{ .Values.ingress.ingressClassName | quote }}
+  {{- end }}
+  rules:
+    {{- if .Values.ingress.hostname }}
+    - host: {{ .Values.ingress.hostname | quote }}
+      http:
+        paths:
+          {{- if .Values.ingress.extraPaths }}
+          {{- toYaml .Values.ingress.extraPaths | nindent 10 }}
+          {{- end }}
+          - path: {{ .Values.ingress.path }}
+            {{- if eq "true" (include "common.ingress.supportsPathType" .) }}
+            pathType: {{ .Values.ingress.pathType }}
+            {{- end }}
+            backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $)  | nindent 14 }}
+    {{- end }}
+    {{- range .Values.ingress.extraHosts }}
+    - host: {{ .name | quote }}
+      http:
+        paths:
+          - path: {{ default "/" .path }}
+            {{- if eq "true" (include "common.ingress.supportsPathType" $) }}
+            pathType: {{ default "ImplementationSpecific" .pathType }}
+            {{- end }}
+            backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
+    {{- end }}
+    {{- if .Values.ingress.extraRules }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }}
+    {{- end }}
+  {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }}
+  tls:
+    {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }}
+    - hosts:
+        - {{ .Values.ingress.hostname | quote }}
+      secretName: {{ printf "%s-tls" .Values.ingress.hostname }}
+    {{- end }}
+    {{- if .Values.ingress.extraTls }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+{{- end }}

+ 17 - 0
clickhouse/templates/init-scripts-secret.yaml

@@ -0,0 +1,17 @@
+{{- if and .Values.initdbScripts (not .Values.initdbScriptsSecret) }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+stringData:
+{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }}
+{{- end }}

+ 32 - 0
clickhouse/templates/scripts-configmap.yaml

@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  setup.sh: |-
+    #!/bin/bash
+
+    # Execute entrypoint as usual after obtaining KEEPER_SERVER_ID
+    # check KEEPER_SERVER_ID in persistent volume via myid
+    # if not present, set based on POD hostname
+    if [[ -f "/bitnami/clickhouse/keeper/data/myid" ]]; then
+        export KEEPER_SERVER_ID="$(cat /bitnami/clickhouse/keeper/data/myid)"
+    else
+        HOSTNAME="$(hostname -s)"
+        if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
+            export KEEPER_SERVER_ID=${BASH_REMATCH[2]}
+        else
+            echo "Failed to get index from hostname $HOST"
+            exit 1
+        fi
+    fi
+    exec /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh -- --listen_host=0.0.0.0

+ 18 - 0
clickhouse/templates/secret.yaml

@@ -0,0 +1,18 @@
+{{- if not .Values.auth.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  admin-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "admin-password" "providedValues" (list "auth.password") "context" $) }}
+{{- end }}

+ 22 - 0
clickhouse/templates/service-account.yaml

@@ -0,0 +1,22 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "clickhouse.serviceAccountName" . }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.serviceAccount.annotations }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.serviceAccount.annotations "context" $) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}

+ 152 - 0
clickhouse/templates/service-external-access.yaml

@@ -0,0 +1,152 @@
+{{- if $.Values.externalAccess.enabled }}
+{{- $shards := $.Values.shards | int }}
+{{- $replicas := $.Values.replicaCount | int }}
+{{- $totalNodes := mul $shards $replicas }}
+{{- range $shard, $e := until $shards }}
+{{- range $i, $_e := until $replicas }}
+{{- $targetPod := printf "%s-shard%d-%d" (include "common.names.fullname" $) $shard $i }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ printf "%s-external" $targetPod | trunc 63 | trimSuffix "-" }}
+  namespace: {{ $.Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" $ | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    pod: {{ $targetPod }}
+    {{- if $.Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if $.Values.externalAccess.service.labels }}
+    {{- include "common.tplvalues.render" ( dict "value" $.Values.externalAccess.service.labels "context" $) | nindent 4 }}
+    {{- end }}
+  {{- if or $.Values.externalAccess.service.annotations $.Values.commonAnnotations $.Values.externalAccess.service.loadBalancerAnnotations }}
+  annotations:
+    {{- if and (not (empty $.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.service.loadBalancerAnnotations) $totalNodes) }}
+    {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if $.Values.externalAccess.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" $.Values.externalAccess.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if $.Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ $.Values.externalAccess.service.type }}
+  {{- if eq $.Values.externalAccess.service.type "LoadBalancer" }}
+  {{- if and (not (empty $.Values.externalAccess.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.service.loadBalancerIPs) $totalNodes) }}
+  loadBalancerIP: {{ index $.Values.externalAccess.service.loadBalancerIPs $i }}
+  {{- end }}
+  {{- if $.Values.externalAccess.service.loadBalancerSourceRanges }}
+  loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }}
+  {{- end }}
+  {{- end }}
+  ports:
+    - name: http
+      port: {{ $.Values.externalAccess.service.ports.http }}
+      targetPort: http
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.http) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.http $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- if $.Values.tls.enabled }}
+    - name: https
+      port: {{ $.Values.externalAccess.service.ports.https }}
+      targetPort: https
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.https) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.https $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if $.Values.metrics.enabled }}
+    - name: http-metrics
+      port: {{ $.Values.externalAccess.service.ports.metrics }}
+      targetPort: http-metrics
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.metrics) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.metrics $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    - name: tcp
+      port: {{ $.Values.externalAccess.service.ports.tcp }}
+      targetPort: tcp
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.tcp) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcp $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- if $.Values.tls.enabled }}
+    - name: tcp-secure
+      port: {{ $.Values.externalAccess.service.ports.tcpSecure }}
+      targetPort: tcp-secure
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.tcpSecure) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcpSecure $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if $.Values.keeper.enabled }}
+    - name: tcp-keeper
+      port: {{ $.Values.externalAccess.service.ports.keeper }}
+      targetPort: tcp-keeper
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.keeper) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeper $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    - name: tcp-keeperinter
+      port: {{ $.Values.externalAccess.service.ports.keeperInter }}
+      targetPort: tcp-keeperinter
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperInter) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperInter $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- if $.Values.tls.enabled }}
+    - name: tcp-keepertls
+      port: {{ $.Values.externalAccess.service.ports.keeperSecure }}
+      targetPort: tcp-keepertls
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperSecure) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperSecure $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- end }}
+    - name: tcp-mysql
+      port: {{ $.Values.externalAccess.service.ports.mysql }}
+      targetPort: tcp-mysql
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.mysql) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.mysql $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    - name: tcp-postgresql
+      port: {{ $.Values.externalAccess.service.ports.postgresql }}
+      targetPort: tcp-postgresql
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.postgresql) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.postgresql $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    - name: tcp-intersrv
+      port: {{ $.Values.externalAccess.service.ports.interserver }}
+      targetPort: tcp-intersrv
+      {{- if not (empty $.Values.externalAccess.service.nodePorts.interserver) }}
+      nodePort: {{ index $.Values.externalAccess.service.nodePorts.interserver $i }}
+      {{- else }}
+      nodePort: null
+      {{- end }}
+    {{- if $.Values.externalAccess.service.extraPorts }}
+    {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }}
+    {{- end }}
+  selector: {{- include "common.labels.matchLabels" $ | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    statefulset.kubernetes.io/pod-name: {{ $targetPod }}
+---
+{{- end }}
+{{- end }}
+{{- end }}

+ 71 - 0
clickhouse/templates/service-headless.yaml

@@ -0,0 +1,71 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "clickhouse.headlessServiceName" . }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.service.headless.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.service.headless.annotations }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.service.headless.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: ClusterIP
+  clusterIP: None
+  publishNotReadyAddresses: true
+  ports:
+    - name: http
+      targetPort: http
+      port: {{ .Values.service.ports.http }}
+      protocol: TCP
+    - name: tcp
+      targetPort: tcp
+      port: {{ .Values.service.ports.tcp }}
+      protocol: TCP
+    {{- if .Values.tls.enabled }}
+    - name: tcp-secure
+      targetPort: tcp-secure
+      port: {{ .Values.service.ports.tcpSecure }}
+      protocol: TCP
+    {{- end }}
+    {{- if .Values.keeper.enabled }}
+    - name: tcp-keeper
+      targetPort: tcp-keeper
+      port: {{ .Values.service.ports.keeper }}
+      protocol: TCP
+    - name: tcp-keeperinter
+      targetPort: tcp-keeperinter
+      port: {{ .Values.service.ports.keeperInter }}
+      protocol: TCP
+    {{- if .Values.tls.enabled }}
+    - name: tcp-keepertls
+      targetPort: tcp-keepertls
+      port: {{ .Values.service.ports.keeperSecure }}
+      protocol: TCP
+    {{- end }}
+    {{- end }}
+    - name: tcp-mysql
+      targetPort: tcp-mysql
+      port: {{ .Values.service.ports.mysql }}
+      protocol: TCP
+    - name: tcp-postgresql
+      targetPort: tcp-postgresql
+      port: {{ .Values.service.ports.postgresql }}
+      protocol: TCP
+    - name: http-intersrv
+      targetPort: http-intersrv
+      port: {{ .Values.service.ports.interserver }}
+      protocol: TCP
+    {{- if .Values.service.extraPorts }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
+    {{- end }}
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse

+ 154 - 0
clickhouse/templates/service.yaml

@@ -0,0 +1,154 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.service.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ .Values.service.type }}
+  {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }}
+  clusterIP: {{ .Values.service.clusterIP }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.service.sessionAffinity }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinityConfig }}
+  sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }}
+  {{- end }}
+  {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
+  externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }}
+  loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: http
+      targetPort: http
+      port: {{ .Values.service.ports.http }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http)) }}
+      nodePort: {{ .Values.service.nodePorts.http }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- if .Values.tls.enabled }}
+    - name: https
+      targetPort: https
+      port: {{ .Values.service.ports.https }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.https)) }}
+      nodePort: {{ .Values.service.nodePorts.https }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    - name: tcp
+      targetPort: tcp
+      port: {{ .Values.service.ports.tcp }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }}
+      nodePort: {{ .Values.service.nodePorts.tcp }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- if .Values.tls.enabled }}
+    - name: tcp-secure
+      targetPort: tcp-secure
+      port: {{ .Values.service.ports.tcpSecure }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }}
+      nodePort: {{ .Values.service.nodePorts.tcpSecure }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if .Values.keeper.enabled }}
+    - name: tcp-keeper
+      targetPort: tcp-keeper
+      port: {{ .Values.service.ports.keeper }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }}
+      nodePort: {{ .Values.service.nodePorts.keeper }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    - name: tcp-keeperinter
+      targetPort: tcp-keeperinter
+      port: {{ .Values.service.ports.keeperInter }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }}
+      nodePort: {{ .Values.service.nodePorts.keeperInter }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- if .Values.tls.enabled }}
+    - name: tcp-keepertls
+      targetPort: tcp-keepertls
+      port: {{ .Values.service.ports.keeperSecure }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }}
+      nodePort: {{ .Values.service.nodePorts.keeperSecure }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- end }}
+    - name: tcp-mysql
+      targetPort: tcp-mysql
+      port: {{ .Values.service.ports.mysql }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mysql)) }}
+      nodePort: {{ .Values.service.nodePorts.mysql }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    - name: tcp-postgresql
+      targetPort: tcp-postgresql
+      port: {{ .Values.service.ports.postgresql }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.postgresql)) }}
+      nodePort: {{ .Values.service.nodePorts.postgresql }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    - name: http-intersrv
+      targetPort: http-intersrv
+      port: {{ .Values.service.ports.interserver }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.interserver)) }}
+      nodePort: {{ .Values.service.nodePorts.interserver }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- if .Values.metrics.enabled }}
+    - name: http-metrics
+      targetPort: http-metrics
+      port: {{ .Values.service.ports.metrics }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }}
+      nodePort: {{ .Values.service.nodePorts.metrics }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if .Values.service.extraPorts }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
+    {{- end }}
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse

+ 49 - 0
clickhouse/templates/servicemonitor.yaml

@@ -0,0 +1,49 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.metrics.serviceMonitor.labels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  annotations:
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.metrics.serviceMonitor.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+spec:
+  jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      {{- if .Values.metrics.serviceMonitor.selector }}
+      {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+      {{- end }}
+  endpoints:
+    - port: http-metrics
+      path: "/metrics"
+      {{- if .Values.metrics.serviceMonitor.interval }}
+      interval: {{ .Values.metrics.serviceMonitor.interval }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.honorLabels }}
+      honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+      metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.relabelings }}
+      relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }}
+      {{- end }}
+  namespaceSelector:
+    matchNames:
+    - {{ include "common.names.namespace" . | quote }}
+{{- end }}

+ 17 - 0
clickhouse/templates/start-scripts-secret.yaml

@@ -0,0 +1,17 @@
+{{- if and .Values.startdbScripts (not .Values.startdbScriptsSecret) }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-start-scripts" (include "common.names.fullname" .) }}
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+stringData:
+{{- include "common.tplvalues.render" (dict "value" .Values.startdbScripts "context" .) | nindent 2 }}
+{{- end }}

+ 414 - 0
clickhouse/templates/statefulset.yaml

@@ -0,0 +1,414 @@
+{{- $shards := .Values.shards | int }}
+{{- range $i, $e := until $shards }}
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" $ }}
+kind: StatefulSet
+metadata:
+  name: {{ printf "%s-shard%d" (include "common.names.fullname" $ ) $i }}
+  namespace: {{ include "common.names.namespace" $ | quote }}
+  labels: {{- include "common.labels.standard" $ | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if $.Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if $.Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  replicas: {{ $.Values.replicaCount }}
+  podManagementPolicy: {{ $.Values.podManagementPolicy | quote }}
+  selector:
+    matchLabels: {{ include "common.labels.matchLabels" $ | nindent 6 }}
+      app.kubernetes.io/component: clickhouse
+  serviceName: {{ printf "%s-headless" (include "common.names.fullname" $) }}
+  {{- if $.Values.updateStrategy }}
+  updateStrategy: {{- toYaml $.Values.updateStrategy | nindent 4 }}
+  {{- end }}
+  template:
+    metadata:
+      annotations:
+        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") $ | sha256sum }}
+        checksum/config-extra: {{ include (print $.Template.BasePath "/configmap-extra.yaml") $ | sha256sum }}
+        {{- if $.Values.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" $.Values.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+        {{- if and $.Values.metrics.enabled $.Values.metrics.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" $.Values.metrics.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+      labels: {{- include "common.labels.standard" $ | nindent 8 }}
+        app.kubernetes.io/component: clickhouse
+        {{- if $.Values.podLabels }}
+        {{- include "common.tplvalues.render" (dict "value" $.Values.podLabels "context" $) | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccountName: {{ template "clickhouse.serviceAccountName" $ }}
+      {{- include "clickhouse.imagePullSecrets" $ | nindent 6 }}
+      {{- if $.Values.hostAliases }}
+      hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.hostAliases "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if $.Values.affinity }}
+      affinity: {{- include "common.tplvalues.render" ( dict "value" $.Values.affinity "context" $) | nindent 8 }}
+      {{- else }}
+      affinity:
+        podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAffinityPreset "component" "clickhouse" "context" $) | nindent 10 }}
+        podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAntiAffinityPreset "component" "clickhouse" "context" $) | nindent 10 }}
+        nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.nodeAffinityPreset.type "key" $.Values.nodeAffinityPreset.key "values" $.Values.nodeAffinityPreset.values) | nindent 10 }}
+      {{- end }}
+      {{- if $.Values.nodeSelector }}
+      nodeSelector: {{- include "common.tplvalues.render" ( dict "value" $.Values.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if $.Values.tolerations }}
+      tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if $.Values.priorityClassName }}
+      priorityClassName: {{ $.Values.priorityClassName | quote }}
+      {{- end }}
+      {{- if $.Values.schedulerName }}
+      schedulerName: {{ $.Values.schedulerName | quote }}
+      {{- end }}
+      {{- if $.Values.topologySpreadConstraints }}
+      topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" $.Values.topologySpreadConstraints "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if $.Values.podSecurityContext.enabled }}
+      securityContext: {{- omit $.Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
+      {{- end }}
+      {{- if $.Values.terminationGracePeriodSeconds }}
+      terminationGracePeriodSeconds: {{ $.Values.terminationGracePeriodSeconds }}
+      {{- end }}
+      initContainers:
+        {{- if and $.Values.tls.enabled (not $.Values.volumePermissions.enabled) }}
+        - name: copy-certs
+          image: {{ include "clickhouse.volumePermissions.image" $ }}
+          imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }}
+          {{- if $.Values.resources }}
+          resources: {{- toYaml $.Values.resources | nindent 12 }}
+          {{- end }}
+          # We don't require a privileged container in this case
+          {{- if $.Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          command:
+            - /bin/sh
+            - -ec
+            - |
+              cp -L /tmp/certs/* /opt/bitnami/clickhouse/certs/
+              chmod 600 {{ include "clickhouse.tlsCertKey" $ }}
+          volumeMounts:
+            - name: raw-certificates
+              mountPath: /tmp/certs
+            - name: clickhouse-certificates
+              mountPath: /opt/bitnami/clickhouse/certs
+        {{- else if and $.Values.volumePermissions.enabled $.Values.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ include "clickhouse.volumePermissions.image" $ }}
+          imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }}
+          command: |
+            mkdir -p /bitnami/clickhouse/data
+            chmod 700 /bitnami/clickhouse/data
+            {{- if $.Values.keeper.enabled }}
+            mkdir -p /bitnami/clickhouse/keeper
+            chmod 700 /bitnami/clickhouse/keeper
+            {{- end }}
+            chown {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /bitnami/clickhouse
+            find /bitnami/clickhouse -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \
+            xargs -r chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }}
+            {{- if $.Values.tls.enabled }}
+            cp /tmp/certs/* /opt/bitnami/clickhouse/certs/
+            {{- if eq ( toString ( $.Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }}
+            chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/clickhouse/certs/
+            {{- else }}
+            chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /opt/bitnami/clickhouse/certs/
+            {{- end }}
+            chmod 600 {{ include "clickhouse.tlsCertKey" $ }}
+            {{- end }}
+          {{- if $.Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if $.Values.volumePermissions.resources }}
+          resources: {{- toYaml $.Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: /bitnami/clickhouse
+            - name: config
+              mountPath: /bitnami/clickhouse/conf/default
+            {{- if $.Values.tls.enabled }}
+            - name: raw-certificates
+              mountPath: /tmp/certs
+            - name: clickhouse-certificates
+              mountPath: /opt/bitnami/clickhouse/certs
+            {{- end }}
+        {{- end }}
+        {{- if $.Values.initContainers }}
+          {{- include "common.tplvalues.render" (dict "value" $.Values.initContainers "context" $) | nindent 8 }}
+        {{- end }}
+      containers:
+        - name: clickhouse
+          image: {{ template "clickhouse.image" $ }}
+          imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          {{- if $.Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if $.Values.diagnosticMode.enabled }}
+          command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }}
+          {{- else if $.Values.command }}
+          command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if $.Values.diagnosticMode.enabled }}
+          args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }}
+          {{- else if $.Values.args }}
+          args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }}
+            - name: CLICKHOUSE_HTTP_PORT
+              value: {{ $.Values.containerPorts.http | quote }}
+            - name: CLICKHOUSE_TCP_PORT
+              value: {{ $.Values.containerPorts.tcp | quote }}
+            - name: CLICKHOUSE_MYSQL_PORT
+              value: {{ $.Values.containerPorts.mysql | quote }}
+            - name: CLICKHOUSE_POSTGRESQL_PORT
+              value: {{ $.Values.containerPorts.postgresql | quote }}
+            - name: CLICKHOUSE_INTERSERVER_HTTP_PORT
+              value: {{ $.Values.containerPorts.interserver | quote }}
+            {{- if $.Values.tls.enabled }}
+            - name: CLICKHOUSE_TCP_SECURE_PORT
+              value: {{ $.Values.containerPorts.tcpSecure | quote }}
+            - name: CLICKHOUSE_HTTPS_PORT
+              value: {{ $.Values.containerPorts.https | quote }}
+            {{- end }}
+            {{- if $.Values.keeper.enabled }}
+            - name: CLICKHOUSE_KEEPER_PORT
+              value: {{ $.Values.containerPorts.keeper | quote }}
+            - name: CLICKHOUSE_KEEPER_INTER_PORT
+              value: {{ $.Values.containerPorts.keeperInter | quote }}
+            {{- if $.Values.tls.enabled }}
+            - name: CLICKHOUSE_KEEPER_SECURE_PORT
+              value: {{ $.Values.containerPorts.keeperSecure | quote }}
+            {{- end }}
+            {{- end }}
+            {{- if $.Values.metrics.enabled }}
+            - name: CLICKHOUSE_METRICS_PORT
+              value: {{ $.Values.containerPorts.metrics | quote }}
+            {{- end }}
+            - name: CLICKHOUSE_ADMIN_USER
+              value: {{ $.Values.auth.username | quote }}
+            - name: CLICKHOUSE_SHARD_ID
+              value: {{ printf "shard%d" $i | quote }}
+            - name: CLICKHOUSE_REPLICA_ID
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: CLICKHOUSE_ADMIN_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "clickhouse.secretName" $ }}
+                  key: {{ include "clickhouse.secretKey" $ }}
+            {{- if $.Values.tls.enabled }}
+            - name: CLICKHOUSE_TLS_CERT_FILE
+              value: {{ include "clickhouse.tlsCert" $ | quote}}
+            - name: CLICKHOUSE_TLS_KEY_FILE
+              value: {{ include "clickhouse.tlsCertKey" $ | quote }}
+            - name: CLICKHOUSE_TLS_CA_FILE
+              value: {{ include "clickhouse.tlsCACert" $ | quote }}
+            {{- end }}
+            {{- if $.Values.extraEnvVars }}
+            {{- include "common.tplvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }}
+            {{- end }}
+            {{- if $.Values.keeper.enabled }}
+            {{- $replicas := $.Values.replicaCount | int }}
+            {{- range $j, $r := until $replicas }}
+            - name: {{ printf "KEEPER_NODE_%d" $j }}
+              value: {{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $i $j (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }}
+            {{- end }}
+            {{- else if $.Values.zookeeper.enabled }}
+            {{- $replicas := $.Values.zookeeper.replicaCount | int }}
+            {{- range $j, $r := until $replicas }}
+            - name: {{ printf "KEEPER_NODE_%d" $j }}
+              value: {{ printf "%s-%d.%s.%s.svc.%s" (include "clickhouse.zookeeper.fullname" $ ) $j (include "clickhouse.zookeeper.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }}
+            {{- end }}
+            {{- end }}
+          envFrom:
+            {{- if $.Values.extraEnvVarsCM }}
+            - configMapRef:
+                name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsCM "context" $) }}
+            {{- end }}
+            {{- if $.Values.extraEnvVarsSecret }}
+            - secretRef:
+                name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsSecret "context" $) }}
+            {{- end }}
+          {{- if $.Values.resources }}
+          resources: {{- toYaml $.Values.resources | nindent 12 }}
+          {{- end }}
+          ports:
+            - name: http
+              containerPort: {{ $.Values.containerPorts.http }}
+            - name: tcp
+              containerPort: {{ $.Values.containerPorts.tcp }}
+            {{- if $.Values.tls.enabled }}
+            - name: https
+              containerPort: {{ $.Values.containerPorts.https }}
+            - name: tcp-secure
+              containerPort: {{ $.Values.containerPorts.tcpSecure }}
+            {{- end }}
+            {{- if $.Values.keeper.enabled }}
+            - name: tcp-keeper
+              containerPort: {{ $.Values.containerPorts.keeper }}
+            - name: tcp-keeperinter
+              containerPort: {{ $.Values.containerPorts.keeperInter }}
+            {{- if $.Values.tls.enabled }}
+            - name: tcp-keepertls
+              containerPort: {{ $.Values.containerPorts.keeperSecure }}
+            {{- end }}
+            {{- end }}
+            - name: tcp-postgresql
+              containerPort: {{ $.Values.containerPorts.postgresql }}
+            - name: tcp-mysql
+              containerPort: {{ $.Values.containerPorts.mysql }}
+            - name: http-intersrv
+              containerPort: {{ $.Values.containerPorts.interserver }}
+            {{- if $.Values.metrics.enabled }}
+            - name: http-metrics
+              containerPort: {{ $.Values.containerPorts.metrics }}
+            {{- end }}
+          {{- if not $.Values.diagnosticMode.enabled }}
+          {{- if $.Values.customLivenessProbe }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customLivenessProbe "context" $) | nindent 12 }}
+          {{- else if $.Values.livenessProbe.enabled }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.livenessProbe "enabled") "context" $) | nindent 12 }}
+            httpGet:
+              path: /ping
+              port: http
+          {{- end }}
+          {{- if $.Values.customReadinessProbe }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customReadinessProbe "context" $) | nindent 12 }}
+          {{- else if $.Values.readinessProbe.enabled }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.readinessProbe "enabled") "context" $) | nindent 12 }}
+            httpGet:
+              path: /ping
+              port: http
+          {{- end }}
+          {{- if $.Values.customStartupProbe }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customStartupProbe "context" $) | nindent 12 }}
+          {{- else if $.Values.startupProbe.enabled }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.startupProbe "enabled") "context" $) | nindent 12 }}
+            httpGet:
+              path: /ping
+              port: http
+          {{- end }}
+          {{- end }}
+          {{- if $.Values.lifecycleHooks }}
+          lifecycle: {{- include "common.tplvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: scripts
+              mountPath: /scripts/setup.sh
+              subPath: setup.sh
+            - name: data
+              mountPath: /bitnami/clickhouse
+            - name: config
+              mountPath: /bitnami/clickhouse/etc/conf.d/default
+          {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }}
+            - name: extra-config
+              mountPath: /bitnami/clickhouse/etc/conf.d/extra-configmap
+          {{- end }}
+          {{- if $.Values.extraOverridesSecret }}
+            - name: extra-secret
+              mountPath: /bitnami/clickhouse/etc/conf.d/extra-secret
+          {{- end }}
+          {{- if $.Values.tls.enabled }}
+            - name: clickhouse-certificates
+              mountPath: /bitnami/clickhouse/certs
+          {{- end }}
+          {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }}
+            - name: custom-init-scripts
+              mountPath: /docker-entrypoint-initdb.d
+          {{- end }}
+          {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }}
+            - name: custom-start-scripts
+              mountPath: /docker-entrypoint-startdb.d
+          {{- end }}
+          {{- if $.Values.extraVolumeMounts }}
+          {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumeMounts "context" $) | nindent 12 }}
+          {{- end }}
+        {{- if $.Values.sidecars }}
+        {{- include "common.tplvalues.render" ( dict "value" $.Values.sidecars "context" $) | nindent 8 }}
+        {{- end }}
+      volumes:
+        - name: scripts
+          configMap:
+            name: {{ printf "%s-scripts" (include "common.names.fullname" $) }}
+            defaultMode: 0755
+        - name: config
+          configMap:
+            name: {{ template "clickhouse.configmapName" $ }}
+        {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }}
+        - name: custom-init-scripts
+          secret:
+            secretName: {{ include "clickhouse.initdbScriptsSecret" $ }}
+        {{- end }}
+        {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }}
+        - name: custom-start-scripts
+          secret:
+            secretName: {{ include "clickhouse.startdbScriptsSecret" $ }}
+        {{- end }}
+        {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }}
+        - name: extra-config
+          configMap:
+            name: {{ template "clickhouse.extraConfigmapName" $ }}
+        {{- end }}
+        {{- if $.Values.extraOverridesSecret }}
+        - name: extra-secret
+          secret:
+            secretName: {{ $.Values.extraOverridesSecret }}
+        {{- end }}
+        {{- if not $.Values.persistence.enabled }}
+        - name: data
+          emptyDir: {}
+        {{- end }}
+        {{- if  $.Values.tls.enabled }}
+        - name: raw-certificates
+          secret:
+            secretName: {{ include "clickhouse.tlsSecretName" $ }}
+        - name: clickhouse-certificates
+          emptyDir: {}
+        {{- end }}
+        {{- if $.Values.extraVolumes }}
+        {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumes "context" $) | nindent 8 }}
+        {{- end }}
+  {{- if $.Values.persistence.enabled }}
+  volumeClaimTemplates:
+    - metadata:
+        name: data
+        annotations:
+          {{- if $.Values.persistence.annotations }}
+          {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.annotations "context" $) | nindent 10 }}
+          {{- end }}
+          {{- if $.Values.commonAnnotations }}
+          {{- include "common.tplvalues.render" (dict "value" $.Values.commonAnnotations "context" $) | nindent 10 }}
+          {{- end }}
+        labels:
+          {{- if $.Values.commonLabels }}
+          {{- include "common.tplvalues.render" (dict "value" $.Values.commonLabels "context" $) | nindent 10 }}
+          {{- end }}
+          {{- if $.Values.persistence.labels }}
+          labels: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.labels "context" $) | nindent 10 }}
+          {{- end }}
+      spec:
+        accessModes:
+        {{- range $.Values.persistence.accessModes }}
+          - {{ . | quote }}
+        {{- end }}
+        resources:
+          requests:
+            storage: {{ $.Values.persistence.size | quote }}
+        {{- if $.Values.persistence.selector }}
+        selector: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.selector "context" $) | nindent 10 }}
+        {{- end }}
+        {{- if $.Values.persistence.dataSource }}
+        dataSource: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.dataSource "context" $) | nindent 10 }}
+        {{- end }}
+        {{- include "common.storage.class" (dict "persistence" $.Values.persistence "global" $.Values.global) | nindent 8 }}
+  {{- end }}
+---
+{{- end }}

+ 27 - 0
clickhouse/templates/tls-secret.yaml

@@ -0,0 +1,27 @@
+{{- if (include "clickhouse.createTlsSecret" . ) }}
+{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "clickhouse-ca" 365 }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $primaryHeadlessServiceName := printf "%s-headless" (include "common.names.fullname" .)}}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}

+ 1070 - 0
clickhouse/values.yaml

@@ -0,0 +1,1070 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+  imageRegistry: ""
+  ## E.g.
+  ## imagePullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  imagePullSecrets: []
+  storageClass: "openebs-hostpath"
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.name
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname
+##
+fullnameOverride: ""
+## @param namespaceOverride String to fully override common.names.namespace
+##
+namespaceOverride: ""
+## @param commonLabels Labels to add to all deployed objects
+##truetruetruetruetruetruetruetruetruetruetruetruetruetrue
+commonLabels: {}
+## @param commonAnnotations Annotations to add to all deployed objects
+##
+commonAnnotations: {}
+## @param clusterDomain Kubernetes cluster domain name
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release
+##
+extraDeploy: []
+
+## Enable diagnostic mode in the deployment
+##
+diagnosticMode:
+  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+  ##
+  enabled: false
+  ## @param diagnosticMode.command Command to override all containers in the deployment
+  ##
+  command:
+    - sleep
+  ## @param diagnosticMode.args Args to override all containers in the deployment
+  ##
+  args:
+    - infinity
+
+## @section ClickHouse Parameters
+##
+
+## Bitnami ClickHouse image
+## ref: https://hub.docker.com/r/bitnami/clickhouse/tags/
+## @param image.registry ClickHouse image registry
+## @param image.repository ClickHouse image repository
+## @param image.tag ClickHouse image tag (immutable tags are recommended)
+## @param image.digest ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy ClickHouse image pull policy
+## @param image.pullSecrets ClickHouse image pull secrets
+## @param image.debug Enable ClickHouse image debug mode
+##
+image:
+  registry: docker.io
+  repository: bitnami/clickhouse
+  tag: 23.3.1-debian-11-r0
+  digest: ""
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ## e.g:
+  ## pullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  pullSecrets: []
+  ## Enable debug mode
+  ##
+  debug: false
+## @param shards Number of ClickHouse shards to deploy
+##
+shards: 1
+
+## @param replicaCount Number of ClickHouse replicas per shard to deploy
+## if keeper enable, same as keeper count, keeper cluster by shards.
+##
+replicaCount: 1
+## @param containerPorts.http ClickHouse HTTP container port
+## @param containerPorts.https ClickHouse HTTPS container port
+## @param containerPorts.tcp ClickHouse TCP container port
+## @param containerPorts.tcpSecure ClickHouse TCP (secure) container port
+## @param containerPorts.keeper ClickHouse keeper TCP container port
+## @param containerPorts.keeperSecure ClickHouse keeper TCP (secure) container port
+## @param containerPorts.keeperInter ClickHouse keeper interserver TCP container port
+## @param containerPorts.mysql ClickHouse MySQL container port
+## @param containerPorts.postgresql ClickHouse PostgreSQL container port
+## @param containerPorts.interserver ClickHouse Interserver container port
+## @param containerPorts.metrics ClickHouse metrics container port
+##
+containerPorts:
+  http: 8123
+  https: 8443
+  tcp: 9000
+  tcpSecure: 9440
+  keeper: 2181
+  keeperSecure: 3181
+  keeperInter: 9444
+  mysql: 9004
+  postgresql: 9005
+  interserver: 9009
+  metrics: 8001
+## Configure extra options for ClickHouse containers' liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+## @param livenessProbe.enabled Enable livenessProbe on ClickHouse containers
+## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+## @param livenessProbe.periodSeconds Period seconds for livenessProbe
+## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
+## @param livenessProbe.successThreshold Success threshold for livenessProbe
+##
+livenessProbe:
+  enabled: true
+  failureThreshold: 3
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 1
+  timeoutSeconds: 1
+## @param readinessProbe.enabled Enable readinessProbe on ClickHouse containers
+## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+## @param readinessProbe.periodSeconds Period seconds for readinessProbe
+## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
+## @param readinessProbe.successThreshold Success threshold for readinessProbe
+##
+readinessProbe:
+  enabled: true
+  failureThreshold: 3
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 1
+  timeoutSeconds: 1
+## @param startupProbe.enabled Enable startupProbe on ClickHouse containers
+## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+## @param startupProbe.periodSeconds Period seconds for startupProbe
+## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
+## @param startupProbe.failureThreshold Failure threshold for startupProbe
+## @param startupProbe.successThreshold Success threshold for startupProbe
+##
+startupProbe:
+  enabled: false
+  failureThreshold: 3
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 1
+  timeoutSeconds: 1
+## @param customLivenessProbe Custom livenessProbe that overrides the default one
+##
+customLivenessProbe: {}
+## @param customReadinessProbe Custom readinessProbe that overrides the default one
+##
+customReadinessProbe: {}
+## @param customStartupProbe Custom startupProbe that overrides the default one
+##
+customStartupProbe: {}
+## ClickHouse resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## @param resources.limits The resources limits for the ClickHouse containers
+## @param resources.requests The requested resources for the ClickHouse containers
+##
+resources:
+  limits: {}
+  requests: {}
+## Configure Pods Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+## @param podSecurityContext.enabled Enabled ClickHouse pods' Security Context
+## @param podSecurityContext.fsGroup Set ClickHouse pod's Security Context fsGroup
+## @param podSecurityContext.seccompProfile.type Set ClickHouse container's Security Context seccomp profile
+## If you are using Kubernetes 1.18, the following code needs to be commented out.
+##
+podSecurityContext:
+  enabled: true
+  fsGroup: 1001
+  seccompProfile:
+    type: "RuntimeDefault"
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## @param containerSecurityContext.enabled Enabled ClickHouse containers' Security Context
+## @param containerSecurityContext.runAsUser Set ClickHouse containers' Security Context runAsUser
+## @param containerSecurityContext.runAsNonRoot Set ClickHouse containers' Security Context runAsNonRoot
+## @param containerSecurityContext.allowPrivilegeEscalation Set ClickHouse container's privilege escalation
+## @param containerSecurityContext.capabilities.drop Set ClickHouse container's Security Context runAsNonRoot
+##
+containerSecurityContext:
+  enabled: true
+  runAsUser: 1001
+  runAsNonRoot: true
+  allowPrivilegeEscalation: false
+  capabilities:
+    drop: ["ALL"]
+
+## Authentication
+## @param auth.username ClickHouse Admin username
+## @param auth.password ClickHouse Admin password
+## @param auth.existingSecret Name of a secret containing the Admin password
+## @param auth.existingSecretKey Name of the key inside the existing secret
+##
+auth:
+  username: default
+  password: "cecf@cestong.com"
+  existingSecret: ""
+  existingSecretKey: ""
+
+## @param logLevel Logging level
+##
+logLevel: information
+
+## @section ClickHouse keeper configuration parameters
+## @param keeper.enabled Deploy ClickHouse keeper. Support is experimental.
+##
+keeper:
+  enabled: false
+
+## @param defaultConfigurationOverrides [string] Default configuration overrides (evaluated as a template)
+##
+defaultConfigurationOverrides: |
+  <clickhouse>
+    <!-- Macros -->
+    <macros>
+      <shard from_env="CLICKHOUSE_SHARD_ID"></shard>
+      <replica from_env="CLICKHOUSE_REPLICA_ID"></replica>
+      <layer>{{ include "common.names.fullname" . }}</layer>
+    </macros>
+    <!-- Log Level -->
+    <logger>
+      <level>{{ .Values.logLevel }}</level>
+    </logger>
+    {{- if or (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1)}}
+    <!-- Cluster configuration - Any update of the shards and replicas requires helm upgrade -->
+    <remote_servers>
+      <default>
+        {{- $shards := $.Values.shards | int }}
+        {{- range $shard, $e := until $shards }}
+        <shard>
+            {{- $replicas := $.Values.replicaCount | int }}
+            {{- range $i, $_e := until $replicas }}
+            <replica>
+                <host>{{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $shard $i (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }}</host>
+                <port>{{ $.Values.service.ports.tcp }}</port>
+            </replica>
+            {{- end }}
+        </shard>
+        {{- end }}
+      </default>
+    </remote_servers>
+    {{- end }}
+    {{- if .Values.keeper.enabled }}
+    <!-- keeper configuration -->
+    <keeper_server>
+      {{/*ClickHouse keeper configuration using the helm chart */}}
+      <tcp_port>{{ $.Values.containerPorts.keeper }}</tcp_port>
+      {{- if .Values.tls.enabled }}
+      <tcp_port_secure>{{ $.Values.containerPorts.keeperSecure }}</tcp_port_secure>
+      {{- end }}
+      <server_id from_env="KEEPER_SERVER_ID"></server_id>
+      <log_storage_path>/bitnami/clickhouse/keeper/coordination/log</log_storage_path>
+      <snapshot_storage_path>/bitnami/clickhouse/keeper/coordination/snapshots</snapshot_storage_path>
+
+      <coordination_settings>
+          <operation_timeout_ms>10000</operation_timeout_ms>
+          <session_timeout_ms>30000</session_timeout_ms>
+          <raft_logs_level>trace</raft_logs_level>
+      </coordination_settings>
+
+      <raft_configuration>
+      {{- $nodes := .Values.replicaCount | int }}
+      {{- range $node, $e := until $nodes }}
+      <server>
+        <id>{{ $node | int }}</id>
+        <hostname from_env="{{ printf "KEEPER_NODE_%d" $node }}"></hostname>
+        <port>{{ $.Values.service.ports.keeperInter }}</port>
+      </server>
+      {{- end }}
+      </raft_configuration>
+    </keeper_server>
+    {{- end }}
+    {{- if or .Values.keeper.enabled .Values.zookeeper.enabled .Values.externalZookeeper.servers }}
+    <!-- Zookeeper configuration -->
+    <zookeeper>
+      {{- if or .Values.keeper.enabled }}
+      {{- $nodes := .Values.replicaCount | int }}
+      {{- range $node, $e := until $nodes }}
+      <node>
+        <host from_env="{{ printf "KEEPER_NODE_%d" $node }}"></host>
+        <port>{{ $.Values.service.ports.keeper }}</port>
+      </node>
+      {{- end }}
+      {{- else if .Values.zookeeper.enabled }}
+      {{/* Zookeeper configuration using the helm chart */}}
+      {{- $nodes := .Values.zookeeper.replicaCount | int }}
+      {{- range $node, $e := until $nodes }}
+      <node>
+        <host from_env="{{ printf "KEEPER_NODE_%d" $node }}"></host>
+        <port>{{ $.Values.zookeeper.service.ports.client }}</port>
+      </node>
+      {{- end }}
+      {{- else if .Values.externalZookeeper.servers }}
+      {{/* Zookeeper configuration using an external instance */}}
+      {{- range $node :=.Values.externalZookeeper.servers }}
+      <node>
+        <host>{{ $node }}</host>
+        <port>{{ $.Values.externalZookeeper.port }}</port>
+      </node>
+      {{- end }}
+      {{- end }}
+    </zookeeper>
+    {{- end }}
+    {{- if .Values.tls.enabled }}
+    <!-- TLS configuration -->
+    <tcp_port_secure from_env="CLICKHOUSE_TCP_SECURE_PORT"></tcp_port_secure>
+    <https_port from_env="CLICKHOUSE_HTTPS_PORT"></https_port>
+    <openSSL>
+        <server>
+            {{- $certFileName := default "tls.crt" .Values.tls.certFilename }}
+            {{- $keyFileName := default "tls.key" .Values.tls.certKeyFilename }}
+            <certificateFile>/bitnami/clickhouse/certs/{{$certFileName}}</certificateFile>
+            <privateKeyFile>/bitnami/clickhouse/certs/{{$keyFileName}}</privateKeyFile>
+            <verificationMode>none</verificationMode>
+            <cacheSessions>true</cacheSessions>
+            <disableProtocols>sslv2,sslv3</disableProtocols>
+            <preferServerCiphers>true</preferServerCiphers>
+            {{- if or .Values.tls.autoGenerated .Values.tls.certCAFilename }}
+            {{- $caFileName := default "ca.crt" .Values.tls.certCAFilename }}
+            <caConfig>/bitnami/clickhouse/certs/{{$caFileName}}</caConfig>
+            {{- else }}
+            <loadDefaultCAFile>true</loadDefaultCAFile>
+            {{- end }}
+        </server>
+        <client>
+            <loadDefaultCAFile>true</loadDefaultCAFile>
+            <cacheSessions>true</cacheSessions>
+            <disableProtocols>sslv2,sslv3</disableProtocols>
+            <preferServerCiphers>true</preferServerCiphers>
+            <verificationMode>none</verificationMode>
+            <invalidCertificateHandler>
+                <name>AcceptCertificateHandler</name>
+            </invalidCertificateHandler>
+        </client>
+    </openSSL>
+    {{- end }}
+    {{- if .Values.metrics.enabled }}
+     <!-- Prometheus metrics -->
+     <prometheus>
+        <endpoint>/metrics</endpoint>
+        <port from_env="CLICKHOUSE_METRICS_PORT"></port>
+        <metrics>true</metrics>
+        <events>true</events>
+        <asynchronous_metrics>true</asynchronous_metrics>
+    </prometheus>
+    {{- end }}
+  </clickhouse>
+
+## @param existingOverridesConfigmap The name of an existing ConfigMap with your custom configuration for ClickHouse
+##
+existingOverridesConfigmap: ""
+
+## @param extraOverrides Extra configuration overrides (evaluated as a template) apart from the default
+##
+extraOverrides: ""
+
+## @param extraOverridesConfigmap The name of an existing ConfigMap with extra configuration for ClickHouse
+##
+extraOverridesConfigmap: ""
+
+## @param extraOverridesSecret The name of an existing ConfigMap with your custom configuration for ClickHouse
+##
+extraOverridesSecret: ""
+
+## @param initdbScripts Dictionary of initdb scripts
+## Specify dictionary of scripts to be run at first boot
+## Example:
+## initdbScripts:
+##   my_init_script.sh: |
+##      #!/bin/bash
+##      echo "Do something."
+##
+initdbScripts: {}
+## @param initdbScriptsSecret ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`)
+##
+initdbScriptsSecret: ""
+
+## @param startdbScripts Dictionary of startdb scripts
+## Specify dictionary of scripts to be run on every start
+## Example:
+## startdbScripts:
+##   my_start_script.sh: |
+##      #!/bin/bash
+##      echo "Do something."
+##
+startdbScripts: {}
+## @param startdbScriptsSecret ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`)
+##
+startdbScriptsSecret: ""
+
+## @param command Override default container command (useful when using custom images)
+##
+command:
+    - /scripts/setup.sh
+## @param args Override default container args (useful when using custom images)
+##
+args: []
+## @param hostAliases ClickHouse pods host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+## @param podLabels Extra labels for ClickHouse pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+## @param podAnnotations Annotations for ClickHouse pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAffinityPreset: ""
+## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAntiAffinityPreset: soft
+## Node affinity preset
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+##
+nodeAffinityPreset:
+  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+  ##
+  type: ""
+  ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
+  ##
+  key: ""
+  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
+  ## E.g.
+  ## values:
+  ##   - e2e-az1
+  ##   - e2e-az2
+  ##
+  values: []
+## @param affinity Affinity for ClickHouse pods assignment
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set
+##
+affinity: {}
+## @param nodeSelector Node labels for ClickHouse pods assignment
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+## @param tolerations Tolerations for ClickHouse pods assignment
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+## @param updateStrategy.type ClickHouse statefulset strategy type
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+##
+updateStrategy:
+  ## StrategyType
+  ## Can be set to RollingUpdate or OnDelete
+  ##
+  type: RollingUpdate
+
+## @param podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join
+## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
+##
+podManagementPolicy: Parallel
+
+## @param priorityClassName ClickHouse pods' priorityClassName
+##
+priorityClassName: ""
+## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+##
+topologySpreadConstraints: []
+## @param schedulerName Name of the k8s scheduler (other than default) for ClickHouse pods
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+## @param terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+##
+terminationGracePeriodSeconds: ""
+## @param lifecycleHooks for the ClickHouse container(s) to automate configuration before or after startup
+##
+lifecycleHooks: {}
+## @param extraEnvVars Array with extra environment variables to add to ClickHouse nodes
+## e.g:
+## extraEnvVars:
+##   - name: FOO
+##     value: "bar"
+##
+extraEnvVars: []
+## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ClickHouse nodes
+##
+extraEnvVarsCM: ""
+## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ClickHouse nodes
+##
+extraEnvVarsSecret: ""
+## @param extraVolumes Optionally specify extra list of additional volumes for the ClickHouse pod(s)
+##
+extraVolumes: []
+## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ClickHouse container(s)
+##
+extraVolumeMounts: []
+## @param sidecars Add additional sidecar containers to the ClickHouse pod(s)
+## e.g:
+## sidecars:
+##   - name: your-image-name
+##     image: your-image
+##     imagePullPolicy: Always
+##     ports:
+##       - name: portname
+##         containerPort: 1234
+##
+sidecars: []
+## @param initContainers Add additional init containers to the ClickHouse pod(s)
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+## e.g:
+## initContainers:
+##  - name: your-image-name
+##    image: your-image
+##    imagePullPolicy: Always
+##    command: ['sh', '-c', 'echo "hello world"']
+##
+initContainers: []
+
+## TLS configuration
+##
+tls:
+  ## @param tls.enabled Enable TLS traffic support
+  ##
+  enabled: false
+  ## @param tls.autoGenerated Generate automatically self-signed TLS certificates
+  ##
+  autoGenerated: false
+  ## @param tls.certificatesSecret Name of an existing secret that contains the certificates
+  ##
+  certificatesSecret: ""
+  ## @param tls.certFilename Certificate filename
+  ##
+  certFilename: ""
+  ## @param tls.certKeyFilename Certificate key filename
+  ##
+  certKeyFilename: ""
+  ## @param tls.certCAFilename CA Certificate filename
+  ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
+  ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
+  ##
+  certCAFilename: ""
+
+## @section Traffic Exposure Parameters
+##
+
+## ClickHouse service parameters
+##
+service:
+  ## @param service.type ClickHouse service type
+  ##
+  type: ClusterIP
+  ## @param service.ports.http ClickHouse service HTTP port
+  ## @param service.ports.https ClickHouse service HTTPS port
+  ## @param service.ports.tcp ClickHouse service TCP port
+  ## @param service.ports.tcpSecure ClickHouse service TCP (secure) port
+  ## @param service.ports.keeper ClickHouse keeper TCP container port
+  ## @param service.ports.keeperSecure ClickHouse keeper TCP (secure) container port
+  ## @param service.ports.keeperInter ClickHouse keeper interserver TCP container port
+  ## @param service.ports.mysql ClickHouse service MySQL port
+  ## @param service.ports.postgresql ClickHouse service PostgreSQL port
+  ## @param service.ports.interserver ClickHouse service Interserver port
+  ## @param service.ports.metrics ClickHouse service metrics port
+  ##
+  ports:
+    http: 8123
+    https: 443
+    tcp: 9000
+    tcpSecure: 9440
+    keeper: 2181
+    keeperSecure: 3181
+    keeperInter: 9444
+    mysql: 9004
+    postgresql: 9005
+    interserver: 9009
+    metrics: 8001
+  ## Node ports to expose
+  ## @param service.nodePorts.http Node port for HTTP
+  ## @param service.nodePorts.https Node port for HTTPS
+  ## @param service.nodePorts.tcp Node port for TCP
+  ## @param service.nodePorts.tcpSecure Node port for TCP (with TLS)
+  ## @param service.nodePorts.keeper ClickHouse keeper TCP container port
+  ## @param service.nodePorts.keeperSecure ClickHouse keeper TCP (secure) container port
+  ## @param service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port
+  ## @param service.nodePorts.mysql Node port for MySQL
+  ## @param service.nodePorts.postgresql Node port for PostgreSQL
+  ## @param service.nodePorts.interserver Node port for Interserver
+  ## @param service.nodePorts.metrics Node port for metrics
+  ## NOTE: choose port between <30000-32767>
+  ##
+  nodePorts:
+    http: ""
+    https: ""
+    tcp: ""
+    tcpSecure: ""
+    keeper: ""
+    keeperSecure: ""
+    keeperInter: ""
+    mysql: ""
+    postgresql: ""
+    interserver: ""
+    metrics: ""
+  ## @param service.clusterIP ClickHouse service Cluster IP
+  ## e.g.:
+  ## clusterIP: None
+  ##
+  clusterIP: ""
+  ## @param service.loadBalancerIP ClickHouse service Load Balancer IP
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+  ##
+  loadBalancerIP: ""
+  ## @param service.loadBalancerSourceRanges ClickHouse service Load Balancer sources
+  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+  ## e.g:
+  ## loadBalancerSourceRanges:
+  ##   - 10.10.10.0/24
+  ##
+  loadBalancerSourceRanges: []
+  ## @param service.externalTrafficPolicy ClickHouse service external traffic policy
+  ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## @param service.annotations Additional custom annotations for ClickHouse service
+  ##
+  annotations: {}
+  ## @param service.extraPorts Extra ports to expose in ClickHouse service (normally used with the `sidecars` value)
+  ##
+  extraPorts: []
+  ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
+  ## Values: ClientIP or None
+  ## ref: https://kubernetes.io/docs/user-guide/services/
+  ##
+  sessionAffinity: None
+  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
+  ## sessionAffinityConfig:
+  ##   clientIP:
+  ##     timeoutSeconds: 300
+  ##
+  sessionAffinityConfig: {}
+  ## Headless service properties
+  ##
+  headless:
+    ## @param service.headless.annotations Annotations for the headless service.
+    ##
+    annotations: {}
+
+## External Access to ClickHouse  configuration
+##
+externalAccess:
+  ## @param externalAccess.enabled Enable Kubernetes external cluster access to ClickHouse
+  ##
+  enabled: false
+  ## Parameters to configure K8s service(s) used to externally access ClickHouse
+  ## Note: A new service per  will be created
+  ##
+  service:
+    ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP
+    ##
+    type: LoadBalancer
+    ## @param externalAccess.service.ports.http ClickHouse service HTTP port
+    ## @param externalAccess.service.ports.https ClickHouse service HTTPS port
+    ## @param externalAccess.service.ports.tcp ClickHouse service TCP port
+    ## @param externalAccess.service.ports.tcpSecure ClickHouse service TCP (secure) port
+    ## @param externalAccess.service.ports.keeper ClickHouse keeper TCP container port
+    ## @param externalAccess.service.ports.keeperSecure ClickHouse keeper TCP (secure) container port
+    ## @param externalAccess.service.ports.keeperInter ClickHouse keeper interserver TCP container port
+    ## @param externalAccess.service.ports.mysql ClickHouse service MySQL port
+    ## @param externalAccess.service.ports.postgresql ClickHouse service PostgreSQL port
+    ## @param externalAccess.service.ports.interserver ClickHouse service Interserver port
+    ## @param externalAccess.service.ports.metrics ClickHouse service metrics port
+    ##
+    ports:
+      http: 80
+      https: 443
+      tcp: 9000
+      tcpSecure: 9440
+      keeper: 2181
+      keeperSecure: 3181
+      keeperInter: 9444
+      mysql: 9004
+      postgresql: 9005
+      interserver: 9009
+      metrics: 8001
+    ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount
+    ## e.g:
+    ## loadBalancerIPs:
+    ##   - X.X.X.X
+    ##   - Y.Y.Y.Y
+    ##
+    loadBalancerIPs: []
+    ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each ClickHouse . Length must be the same as replicaCount
+    ## e.g:
+    ## loadBalancerAnnotations:
+    ##   - external-dns.alpha.kubernetes.io/hostname: 1.external.example.com.
+    ##   - external-dns.alpha.kubernetes.io/hostname: 2.external.example.com.
+    ##
+    loadBalancerAnnotations: []
+    ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer
+    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+    ## e.g:
+    ## loadBalancerSourceRanges:
+    ## - 10.10.10.0/24
+    ##
+    loadBalancerSourceRanges: []
+    ## @param externalAccess.service.nodePorts.http Node port for HTTP
+    ## @param externalAccess.service.nodePorts.https Node port for HTTPS
+    ## @param externalAccess.service.nodePorts.tcp Node port for TCP
+    ## @param externalAccess.service.nodePorts.tcpSecure Node port for TCP (with TLS)
+    ## @param externalAccess.service.nodePorts.keeper ClickHouse keeper TCP container port
+    ## @param externalAccess.service.nodePorts.keeperSecure ClickHouse keeper TCP container port (with TLS)
+    ## @param externalAccess.service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port
+    ## @param externalAccess.service.nodePorts.mysql Node port for MySQL
+    ## @param externalAccess.service.nodePorts.postgresql Node port for PostgreSQL
+    ## @param externalAccess.service.nodePorts.interserver Node port for Interserver
+    ## @param externalAccess.service.nodePorts.metrics Node port for metrics
+    ## NOTE: choose port between <30000-32767>
+    ## e.g:
+    ## nodePorts:
+    ##   tls:
+    ##   - 30001
+    ##   - 30002
+    ##
+    nodePorts:
+      http: []
+      https: []
+      tcp: []
+      tcpSecure: []
+      keeper: []
+      keeperSecure: []
+      keeperInter: []
+      mysql: []
+      postgresql: []
+      interserver: []
+      metrics: []
+    ## @param externalAccess.service.labels Service labels for external access
+    ##
+    labels: {}
+    ## @param externalAccess.service.annotations Service annotations for external access
+    ##
+    annotations: {}
+    ## @param externalAccess.service.extraPorts Extra ports to expose in the ClickHouse external service
+    ##
+    extraPorts: []
+
+## ClickHouse ingress parameters
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+  ## @param ingress.enabled Enable ingress record generation for ClickHouse
+  ##
+  enabled: true
+  ## @param ingress.pathType Ingress path type
+  ##
+  pathType: ImplementationSpecific
+  ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
+  ##
+  apiVersion: ""
+  ## @param ingress.hostname Default host for the ingress record
+  ##
+  hostname: clickhouse.cecf.base
+  ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
+  ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
+  ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
+  ##
+  ingressClassName: "nginx"
+  ## @param ingress.path Default path for the ingress record
+  ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
+  ##
+  path: /
+  ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
+  ## Use this parameter to set the required annotations for cert-manager, see
+  ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+  ## e.g:
+  ## annotations:
+  ##   kubernetes.io/ingress.class: nginx
+  ##   cert-manager.io/cluster-issuer: cluster-issuer-name
+  ##
+  annotations: {}
+  ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
+  ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
+  ## You can:
+  ##   - Use the `ingress.secrets` parameter to create this TLS secret
+  ##   - Rely on cert-manager to create it by setting the corresponding annotations
+  ##   - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
+  ##
+  tls: false
+  ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
+  ##
+  selfSigned: false
+  ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
+  ## e.g:
+  ## extraHosts:
+  ##   - name: clickhouse.local
+  ##     path: /
+  ##
+  extraHosts: []
+  ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
+  ## e.g:
+  ## extraPaths:
+  ## - path: /*
+  ##   backend:
+  ##     serviceName: ssl-redirect
+  ##     servicePort: use-annotation
+  ##
+  extraPaths: []
+  ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+  ## e.g:
+  ## extraTls:
+  ## - hosts:
+  ##     - clickhouse.local
+  ##   secretName: clickhouse.local-tls
+  ##
+  extraTls: []
+  ## @param ingress.secrets Custom TLS certificates as secrets
+  ## NOTE: 'key' and 'certificate' are expected in PEM format
+  ## NOTE: 'name' should line up with a 'secretName' set further up
+  ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
+  ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
+  ## It is also possible to create and manage the certificates outside of this helm chart
+  ## Please see README.md for more information
+  ## e.g:
+  ## secrets:
+  ##   - name: clickhouse.local-tls
+  ##     key: |-
+  ##       -----BEGIN RSA PRIVATE KEY-----
+  ##       ...
+  ##       -----END RSA PRIVATE KEY-----
+  ##     certificate: |-
+  ##       -----BEGIN CERTIFICATE-----
+  ##       ...
+  ##       -----END CERTIFICATE-----
+  ##
+  secrets: []
+  ## @param ingress.extraRules Additional rules to be covered with this ingress record
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
+  ## e.g:
+  ## extraRules:
+  ## - host: example.local
+  ##     http:
+  ##       path: /
+  ##       backend:
+  ##         service:
+  ##           name: example-svc
+  ##           port:
+  ##             name: http
+  ##
+  extraRules: []
+
+## @section Persistence Parameters
+##
+
+## Enable persistence using Persistent Volume Claims
+## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+  ## @param persistence.enabled Enable persistence using Persistent Volume Claims
+  ##
+  enabled: true
+  ## @param persistence.storageClass Storage class of backing PVC
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  storageClass: "openebs-hostpath"
+  ## @param persistence.labels Persistent Volume Claim labels
+  ##
+  labels: {}
+  ## @param persistence.annotations Persistent Volume Claim annotations
+  ##
+  annotations: {}
+  ## @param persistence.accessModes Persistent Volume Access Modes
+  ##
+  accessModes:
+    - ReadWriteOnce
+  ## @param persistence.size Size of data volume
+  ##
+  size: 8Gi
+  ## @param persistence.selector Selector to match an existing Persistent Volume for WordPress data PVC
+  ## If set, the PVC can't have a PV dynamically provisioned for it
+  ## E.g.
+  ## selector:
+  ##   matchLabels:
+  ##     app: my-app
+  ##
+  selector: {}
+  ## @param persistence.dataSource Custom PVC data source
+  ##
+  dataSource: {}
+## @section Init Container Parameters
+##
+
+## 'volumePermissions' init container parameters
+## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
+##   based on the *podSecurityContext/*containerSecurityContext parameters
+##
+volumePermissions:
+  ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
+  ##
+  enabled: false
+  ## Bitnami Shell image
+  ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/
+  ## @param volumePermissions.image.registry Bitnami Shell image registry
+  ## @param volumePermissions.image.repository Bitnami Shell image repository
+  ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended)
+  ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy
+  ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets
+  ##
+  image:
+    registry: docker.io
+    repository: bitnami/bitnami-shell
+    tag: 11-debian-11-r101
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ## e.g:
+    ## pullSecrets:
+    ##   - myRegistryKeySecretName
+    ##
+    pullSecrets: []
+  ## Init container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ## @param volumePermissions.resources.limits The resources limits for the init container
+  ## @param volumePermissions.resources.requests The requested resources for the init container
+  ##
+  resources:
+    limits: {}
+    requests: {}
+  ## Init container Container Security Context
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+  ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser
+  ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
+  ##   data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
+  ##   "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
+  ##
+  containerSecurityContext:
+    runAsUser: 0
+
+## @section Other Parameters
+##
+
+## ServiceAccount configuration
+##
+serviceAccount:
+  ## @param serviceAccount.create Specifies whether a ServiceAccount should be created
+  ##
+  create: true
+  ## @param serviceAccount.name The name of the ServiceAccount to use.
+  ## If not set and create is true, a name is generated using the common.names.fullname template
+  ##
+  name: ""
+  ## @param serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
+  ##
+  annotations: {}
+  ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
+  ##
+  automountServiceAccountToken: true
+
+## Prometheus metrics
+##
+metrics:
+  ## @param metrics.enabled Enable the export of Prometheus metrics
+  ##
+  enabled: false
+  ## @param metrics.podAnnotations [object] Annotations for metrics scraping
+  ##
+  podAnnotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "{{ .Values.containerPorts.metrics }}"
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
+    ##
+    enabled: false
+    ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
+    ##
+    namespace: ""
+    ## @param metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor
+    ##
+    annotations: {}
+    ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
+    ##
+    labels: {}
+    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus
+    ##
+    jobLabel: ""
+    ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
+    ##
+    honorLabels: false
+    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ## e.g:
+    ## interval: 10s
+    ##
+    interval: ""
+    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ## e.g:
+    ## scrapeTimeout: 10s
+    ##
+    scrapeTimeout: ""
+    ## @param metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics
+    ##
+    metricRelabelings: []
+    ## @param metrics.serviceMonitor.relabelings Specify general relabeling
+    ##
+    relabelings: []
+    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+    ## selector:
+    ##   prometheus: my-prometheus
+    ##
+    selector: {}
+
+## @section External Zookeeper paramaters
+##
+externalZookeeper:
+  ## @param externalZookeeper.servers List of external zookeeper servers to use
+  ## @param externalZookeeper.port Port of the Zookeeper servers
+  ##
+  servers: []
+  port: 2888
+
+## @section Zookeeper subchart parameters
+##
+## @param zookeeper.enabled Deploy Zookeeper subchart
+## @param zookeeper.replicaCount Number of Zookeeper instances
+## @param zookeeper.service.ports.client Zookeeper client port
+##
+zookeeper:
+  enabled: true
+  replicaCount: 1
+  service:
+    ports:
+      client: 2181

+ 359 - 0
deepflow/deepflow-ebpf-spring-demo.yaml

@@ -0,0 +1,359 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: deepflow-ebpf-spring-demo
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: demo-config
+  namespace: deepflow-ebpf-spring-demo
+data:
+  REDIS_HOST: redis-master
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: redis
+    release: redis
+  name: redis-master
+  namespace: deepflow-ebpf-spring-demo
+spec:
+  type: ClusterIP
+  selector:
+    app: redis
+    release: redis
+    role: master
+  ports:
+  - name: redis
+    port: 6379
+    protocol: TCP
+    targetPort: redis
+---
+apiVersion: apps/v1 # apps/v1beta2
+kind: StatefulSet
+metadata:
+  labels:
+    app: redis
+    chart: redis-3.0.2
+    release: redis
+  name: redis-master
+  namespace: deepflow-ebpf-spring-demo
+spec:
+  podManagementPolicy: OrderedReady
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      app: redis
+      release: redis
+      role: master
+  serviceName: redis-master
+  template:
+    metadata:
+      labels:
+        app: redis
+        release: redis
+        role: master
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - loo-svc
+                  - foo-svc
+                  - bar-svc
+                  - loadgenerator
+                  - redis
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: redis-master
+        env:
+        - name: REDIS_DISABLE_COMMANDS
+          value: FLUSHDB,FLUSHALL
+        - name: REDIS_REPLICATION_MODE
+          value: master
+        - name: ALLOW_EMPTY_PASSWORD
+          value: "yes"
+        image: bitnami/redis:4.0.9
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          exec:
+            command:
+            - redis-cli
+            - ping
+          failureThreshold: 5
+          initialDelaySeconds: 30
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 5
+        ports:
+        - containerPort: 6379
+          name: redis
+          protocol: TCP
+        readinessProbe:
+          exec:
+            command:
+            - redis-cli
+            - ping
+          failureThreshold: 5
+          initialDelaySeconds: 5
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 1
+        volumeMounts:
+        - mountPath: /bitnami/redis/data
+          name: redis-data
+      dnsPolicy: ClusterFirst
+      restartPolicy: Always
+      securityContext:
+        fsGroup: 1001
+        # runAsUser: 1001
+        # https://github.com/bitnami/bitnami-docker-redis/issues/106#issuecomment-388884372
+        runAsUser: 0
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - emptyDir: {}
+        name: redis-data
+  updateStrategy:
+    rollingUpdate:
+      partition: 0
+    type: RollingUpdate
+---
+apiVersion: v1
+kind: Service
+metadata:
+  namespace: deepflow-ebpf-spring-demo
+  name: foo-svc
+  labels:
+    app: foo-svc
+spec:
+  ports:
+    - port: 80
+      targetPort: http
+      protocol: TCP
+      name: http
+  selector:
+    app: foo-svc
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: foo-svc
+  namespace: deepflow-ebpf-spring-demo
+spec:
+  selector:
+    matchLabels:
+      app: foo-svc
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: foo-svc
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - loo-svc
+                  - foo-svc
+                  - bar-svc
+                  - loadgenerator
+                  - redis
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: foo-svc
+        image: chanjarster/spring-boot-istio-jaeger-demo-foo-svc:0.1.0
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8080
+          name: http
+        env:
+          - name: JVM_OPTS
+            value: "-Xms256m -Xmx256m"
+        envFrom:
+        - configMapRef:
+            name: demo-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  namespace: deepflow-ebpf-spring-demo
+  name: bar-svc
+  labels:
+    app: bar-svc
+spec:
+  ports:
+    - port: 8080
+      targetPort: http
+      protocol: TCP
+      name: http
+  selector:
+    app: bar-svc
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: bar-svc
+  namespace: deepflow-ebpf-spring-demo
+spec:
+  selector:
+    matchLabels:
+      app: bar-svc
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: bar-svc
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - loo-svc
+                  - foo-svc
+                  - bar-svc
+                  - loadgenerator
+                  - redis
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: bar-svc
+        image: chanjarster/spring-boot-istio-jaeger-demo-bar-svc:0.1.0
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8080
+          name: http
+        env:
+          - name: JVM_OPTS
+            value: "-Xms256m -Xmx256m"
+        envFrom:
+        - configMapRef:
+            name: demo-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  namespace: deepflow-ebpf-spring-demo
+  name: loo-svc
+  labels:
+    app: loo-svc
+spec:
+  ports:
+    - port: 8080
+      targetPort: http
+      protocol: TCP
+      name: http
+  selector:
+    app: loo-svc
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: loo-svc
+  namespace: deepflow-ebpf-spring-demo
+spec:
+  selector:
+    matchLabels:
+      app: loo-svc
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: loo-svc
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - loo-svc
+                  - foo-svc
+                  - bar-svc
+                  - loadgenerator
+                  - redis
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: loo-svc
+        image: chanjarster/spring-boot-istio-jaeger-demo-loo-svc:0.1.0
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8080
+          name: http
+        env:
+          - name: JVM_OPTS
+            value: "-Xms256m -Xmx256m"
+        envFrom:
+        - configMapRef:
+            name: demo-config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: loadgenerator
+  namespace: deepflow-ebpf-spring-demo
+  labels:
+    app: loadgenerator
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: loadgenerator
+  template:
+    metadata:
+      labels:
+        app: loadgenerator
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - loo-svc
+                  - foo-svc
+                  - bar-svc
+                  - loadgenerator
+                  - redis
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: curl
+        image: docker.io/litmuschaos/curl:latest
+        imagePullPolicy: IfNotPresent
+        command:
+        - /bin/sh
+        - -exc
+        - |
+          while true
+          do
+          curl -i foo-svc
+          sleep 3
+          done

+ 936 - 0
deepflow/deepflow-otel-skywalking-demo.yaml

@@ -0,0 +1,936 @@
+# Copyright (c) 2022 Yunshan Networks
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##################################################################################################
+# This file defines the services, deployments, configmap, statefulsets 
+#
+# To deploy full demo, you can :
+# kubectl apply -f deepflow-otel-skywalking-demo.yaml
+##################################################################################################
+
+##################################################################################################
+# Namespace
+##################################################################################################
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: deepflow-otel-skywalking-demo
+---
+##################################################################################################
+# Deployments
+##################################################################################################
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-item
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-item
+  template:
+    metadata:
+      labels:
+        app: svc-item
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-item
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-item:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-item
+        - name: SKYWALKING_HOST
+          value: otel-agent.open-telemetry
+        - name: SKYWALKING_PORT
+          value: '11800'
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config 
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: apache/skywalking-java-agent:8.11.0-java8
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /skywalking/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: item-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-order
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-order
+  template:
+    metadata:
+      labels:
+        app: svc-order
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-order
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-order:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-order
+        - name: SKYWALKING_HOST
+          value: otel-agent.open-telemetry
+        - name: SKYWALKING_PORT
+          value: '11800'
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+      initContainers:
+        - name: sidecar
+          image: apache/skywalking-java-agent:8.11.0-java8
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /skywalking/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: order-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-stock
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-stock
+  template:
+    metadata:
+      labels:
+        app: svc-stock
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-stock
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-stock:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-stock
+        - name: SKYWALKING_HOST
+          value: otel-agent.open-telemetry
+        - name: SKYWALKING_PORT
+          value: '11800'
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: apache/skywalking-java-agent:8.11.0-java8
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /skywalking/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: stock-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-user
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-user
+  template:
+    metadata:
+      labels:
+        app: svc-user
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-user
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-user:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-user
+        - name: SKYWALKING_HOST
+          value: otel-agent.open-telemetry
+        - name: SKYWALKING_PORT
+          value: '11800'
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: apache/skywalking-java-agent:8.11.0-java8
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /skywalking/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: user-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: web-shop
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: web-shop
+  template:
+    metadata:
+      labels:
+        app: web-shop
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: web-shop
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-shopweb:1.0
+        imagePullPolicy: IfNotPresent
+        ports:
+        - containerPort: 8090
+        env:
+        - name: SW_AGENT_NAME
+          value: spring-svc-webshop
+        - name: SKYWALKING_HOST
+          value: otel-agent.open-telemetry
+        - name: SKYWALKING_PORT
+          value: '11800'
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+      initContainers:
+        - name: sidecar
+          image: apache/skywalking-java-agent:8.11.0-java8
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /skywalking/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: loadgenerator
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: loadgenerator
+  template:
+    metadata:
+      labels:
+        app: loadgenerator
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+        - name: loadgenerator
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_loadgenerator:latest
+          env:
+            - name: FRONTEND_ADDR
+              value: web-shop:8090
+            - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+              value: http://otel-agent.open-telemetry:4317
+            - name: OTEL_RESOURCE_ATTRIBUTES
+              value: service.name=loadgenerator
+            - name: USERS
+              value: '1'
+##################################################################################################
+# StatefulSets
+##################################################################################################
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: db-demo
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  serviceName: db-demo
+  selector:
+    matchLabels:
+      app: db-demo
+  template:
+    metadata:
+      labels:
+        app: db-demo
+        version: 5.7.18
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: db-demo
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-mysql:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_ROOT_PASSWORD
+          value: "123"
+        ports:
+        - containerPort: 3306
+        readinessProbe:
+          exec:
+            command: ["mysql", "-hlocalhost", "-uroot", "-p123", "-P3306", "--database", "skywalking", "-e", "SELECT 1"]
+          initialDelaySeconds: 15
+          periodSeconds: 5
+          timeoutSeconds: 2
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: nacos
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  replicas: 1
+  serviceName: nacos
+  selector:
+    matchLabels:
+      app: nacos
+  template:
+    metadata:
+      labels:
+        app: nacos
+        version: 1.1.4
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: nacos
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-nacos-server:1.1.4
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: MODE
+          value: standalone
+        ports:
+        - containerPort: 8848
+        readinessProbe:
+          httpGet:
+            path: /nacos
+            port: 8848
+          initialDelaySeconds: 3
+          periodSeconds: 5
+          timeoutSeconds: 2
+---
+##################################################################################################
+# ConfigMaps
+##################################################################################################
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+       main:
+          allow-bean-definition-overriding: true
+       output:
+          ansi:
+             enabled: always
+    application:
+      name: item
+
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20880}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    zipkin:
+       server: ${zipkin.base-url}/api/v2/spans
+       connectTimeout: 5000
+       readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: item-service
+  namespace: deepflow-otel-skywalking-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+      main:
+          allow-bean-definition-overriding: true
+      output:
+          ansi:
+            enabled: always
+      datasource:
+          driver-class-name: ${mysql.driver-class}
+          url: jdbc:mysql://${mysql.host}:${mysql.port}/${db.order}?connectTimeout=3000&socketTimeout=10000&characterEncoding=utf8&useTimezone=true&serverTimezone=Asia/Shanghai&useSSL=false${jdbc.interceptors}
+          username: ${mysql.user}  
+          password: ${mysql.password}
+
+    application:
+      name: order 
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20883}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+        consumer:
+            filter: ${dubbo.filter}
+            # Fault tolerance. Options: failover/failfast/failsafe/failback/forking
+            cluster: failfast
+            # Load balancing strategy. Options: random, roundrobin(polling), leastactive(invoking least active service)
+            loadbalance: roundrobin
+
+    druid:
+      defaultAutoCommit: true
+      defaultTransactionIsolation: 2
+      initialSize: 1
+      maxActive: 3
+      maxWait: 10000
+      minIdle: 1
+      validationQuery: /* ping */ select 1
+      testOnBorrow: false
+      testOnReturn: false
+      testWhileIdle: true
+      timeBetweenEvictionRunsMillis: 60000
+      minEvictableIdleTimeMillis: 1800000
+      removeAbandoned: true
+      removeAbandonedTimeout: 1800
+      logAbandoned: true
+
+    mybatis: # see MybatisProperties.class in mybatis-spring-boot-autoconfigure
+      mapperLocations: classpath:mappers/*.xml
+
+    seata:
+      enabled: true
+      application-id: ${application.name}
+      tx-service-group: my_demo_gtx
+      config:
+        type: nacos
+        nacos:
+          namespace:
+          serverAddr: ${nacos.address}
+      registry:
+        type: nacos
+        nacos:
+          cluster: default
+          server-addr: ${nacos.address}
+          namespace:
+
+    zipkin:
+      server: ${zipkin.base-url}/api/v2/spans
+      connectTimeout: 5000
+      readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: order-service
+  namespace: deepflow-otel-skywalking-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+        main:
+            allow-bean-definition-overriding: true
+        output:
+            ansi:
+                enabled: always
+
+    application:
+      name: stock
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20881}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    zipkin:
+       server: ${zipkin.base-url}/api/v2/spans
+       connectTimeout: 5000
+       readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: stock-service
+  namespace: deepflow-otel-skywalking-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    mysql:
+      driver-class: com.mysql.jdbc.Driver
+      host: ${MYSQL_HOST:localhost}
+      port: ${MYSQL_PORT:3306}
+      user: mydemo
+      password: mydemo
+    db:
+      order: mydemo-dn1
+      user: mydemo-dn1
+    jdbc:
+      interceptors:
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    spring:
+      main:
+          allow-bean-definition-overriding: true
+      output:
+          ansi:
+            enabled: always
+      datasource:  
+          driver-class-name: ${mysql.driver-class}
+          url: jdbc:mysql://${mysql.host}:${mysql.port}/${db.user}?connectTimeout=3000&socketTimeout=10000&characterEncoding=utf8&useTimezone=true&serverTimezone=Asia/Shanghai&useSSL=false${jdbc.interceptors}
+          username: ${mysql.user}  
+          password: ${mysql.password}
+
+    application:
+      name: user
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20882}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    druid:
+      defaultAutoCommit: true
+      defaultTransactionIsolation: 2
+      initialSize: 1
+      maxActive: 3
+      maxWait: 5000
+      minIdle: 1
+      validationQuery: /* ping */ select 1
+      testOnBorrow: false
+      testOnReturn: false
+      testWhileIdle: true
+      timeBetweenEvictionRunsMillis: 60000
+      minEvictableIdleTimeMillis: 1800000 
+      removeAbandoned: true
+      removeAbandonedTimeout: 1800
+      logAbandoned: true
+
+    mybatis: # see MybatisProperties.class in mybatis-spring-boot-autoconfigure
+      mapperLocations: classpath:mappers/*.xml
+
+    seata:
+      enabled: true
+      application-id: ${application.name}
+      tx-service-group: my_demo_gtx
+      config:
+        type: nacos
+        nacos:
+          namespace:
+          serverAddr: ${nacos.address}
+      registry:
+        type: nacos
+        nacos:
+          cluster: default
+          server-addr: ${nacos.address}
+          namespace:
+
+    zipkin:
+      server: ${zipkin.base-url}/api/v2/spans
+      connectTimeout: 5000
+      readTimeout: 5000
+
+    mydemo:
+      hostname: ${HOSTNAME:localhost}
+kind: ConfigMap
+metadata:
+  name: user-service
+  namespace: deepflow-otel-skywalking-demo
+---
+##################################################################################################
+# Services
+##################################################################################################
+kind: Service
+apiVersion: v1
+metadata:
+  name: nacos
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  type: ClusterIP
+  ports:
+  - name: http-nacos
+    port: 8848
+    targetPort: 8848
+  selector:
+    app: nacos
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: web-shop
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  type: ClusterIP
+  ports:
+    - name: http-shop
+      port: 8090
+      targetPort: 8090
+  selector:
+    app: web-shop
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: db-demo
+  namespace: deepflow-otel-skywalking-demo
+spec:
+  type: ClusterIP
+  ports:
+  - name: mysql
+    port: 3306
+    targetPort: 3306
+  selector:
+    app: db-demo

+ 966 - 0
deepflow/deepflow-otel-spring-demo.yaml

@@ -0,0 +1,966 @@
+# Copyright (c) 2022 Yunshan Networks
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##################################################################################################
+# This file defines the services, deployments, configmap, statefulsets 
+#
+# To deploy full demo, you can :
+# kubectl apply -f springboot-otel-demo.yaml -n ${YOUR-NAMESPACE}
+##################################################################################################
+
+##################################################################################################
+# Namespace
+##################################################################################################
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: deepflow-otel-spring-demo
+---
+##################################################################################################
+# Deployments
+##################################################################################################
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-item
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-item
+  template:
+    metadata:
+      labels:
+        app: svc-item
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-item
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-item:1.0
+        command:
+          - sh
+        args:
+          - /home/docker-entrypoint.sh
+          - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
+          - '-Dotel.resource.attributes=service.name=item-svc'
+          - '-Dotel.traces.exporter=otlp'
+          - '-jar'
+          - /home/item-service-0.0.1-SNAPSHOT.jar
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-item
+        - name: OTEL_EXPORTER_OTLP_ENDPOINT
+          value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config 
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/opentelemetry-javaagent:1.15.0
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /opentelemetry/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: item-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-order
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-order
+  template:
+    metadata:
+      labels:
+        app: svc-order
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-order
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-order:1.0
+        command:
+          - sh
+        args:
+          - /home/docker-entrypoint.sh
+          - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
+          - '-Dotel.resource.attributes=service.name=order-svc'
+          - '-Dotel.traces.exporter=otlp'
+          - '-jar'
+          - /home/order-service-0.0.1-SNAPSHOT.jar
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-order
+        - name: OTEL_EXPORTER_OTLP_ENDPOINT
+          value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+      initContainers:
+        - name: sidecar
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/opentelemetry-javaagent:1.15.0
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /opentelemetry/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: order-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-stock
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-stock
+  template:
+    metadata:
+      labels:
+        app: svc-stock
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-stock
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-stock:1.0
+        command:
+          - sh
+        args:
+          - /home/docker-entrypoint.sh
+          - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
+          - '-Dotel.resource.attributes=service.name=stock-svc'
+          - '-Dotel.traces.exporter=otlp'
+          - '-jar'
+          - /home/stock-service-0.0.1-SNAPSHOT.jar
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-stock
+        - name: OTEL_EXPORTER_OTLP_ENDPOINT
+          value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/opentelemetry-javaagent:1.15.0
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /opentelemetry/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: stock-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: svc-user
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: svc-user
+  template:
+    metadata:
+      labels:
+        app: svc-user
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: svc-user
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-user:1.0
+        command:
+          - sh
+        args:
+          - /home/docker-entrypoint.sh
+          - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
+          - '-Dotel.resource.attributes=service.name=user-svc'
+          - '-Dotel.traces.exporter=otlp'
+          - '-jar'
+          - /home/user-service-0.0.1-SNAPSHOT.jar
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SERVICE_PORT
+          value: "20880"
+        - name: SW_AGENT_NAME
+          value: spring-svc-user
+        - name: OTEL_EXPORTER_OTLP_ENDPOINT
+          value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+        ports:
+        - containerPort: 20880
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+          - name: config
+            mountPath: /home/application.yml
+            subPath: application.yml
+      initContainers:
+        - name: sidecar
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/opentelemetry-javaagent:1.15.0
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /opentelemetry/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+        - configMap:
+            name: user-service
+          name: config
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: web-shop
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: web-shop
+  template:
+    metadata:
+      labels:
+        app: web-shop
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: web-shop
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-shopweb:1.0
+        command:
+          - sh
+        args:
+          - /home/docker-entrypoint.sh
+          - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
+          - '-Dotel.resource.attributes=service.name=shop-web'
+          - '-Dotel.traces.exporter=otlp'
+          - '-jar'
+          - /home/shop-web-0.0.1-SNAPSHOT.jar
+        imagePullPolicy: IfNotPresent
+        ports:
+        - containerPort: 8090
+        env:
+        - name: SW_AGENT_NAME
+          value: spring-svc-webshop
+        - name: OTEL_EXPORTER_OTLP_ENDPOINT
+          value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+        volumeMounts:
+          - name: sidecar
+            mountPath: /sidecar
+      initContainers:
+        - name: sidecar
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/opentelemetry-javaagent:1.15.0
+          command:
+            - /bin/sh
+          args:
+            - '-c'
+            - cp -R /opentelemetry/agent /sidecar/
+          volumeMounts:
+            - name: sidecar
+              mountPath: /sidecar
+      volumes:
+        - name: sidecar
+          emptyDir: {}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: loadgenerator
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: loadgenerator
+  template:
+    metadata:
+      labels:
+        app: loadgenerator
+        version: latest
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+        - name: loadgenerator
+          image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_loadgenerator:latest
+          env:
+            - name: FRONTEND_ADDR
+              value: web-shop:8090
+            - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+              value: http://otel-collector-opentelemetry-collector.open-telemetry:4317
+            - name: OTEL_RESOURCE_ATTRIBUTES
+              value: service.name=loadgenerator
+            - name: USERS
+              value: '1'
+##################################################################################################
+# StatefulSets
+##################################################################################################
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: db-demo
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  serviceName: db-demo
+  selector:
+    matchLabels:
+      app: db-demo
+  template:
+    metadata:
+      labels:
+        app: db-demo
+        version: 5.7.18
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: db-demo
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-mysql:1.0
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_ROOT_PASSWORD
+          value: "123"
+        ports:
+        - containerPort: 3306
+        readinessProbe:
+          exec:
+            command: ["mysql", "-hlocalhost", "-uroot", "-p123", "-P3306", "--database", "skywalking", "-e", "SELECT 1"]
+          initialDelaySeconds: 15
+          periodSeconds: 5
+          timeoutSeconds: 2
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: nacos
+  namespace: deepflow-otel-spring-demo
+spec:
+  replicas: 1
+  serviceName: nacos
+  selector:
+    matchLabels:
+      app: nacos
+  template:
+    metadata:
+      labels:
+        app: nacos
+        version: 1.1.4
+    spec:
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - weight: 100
+            podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - svc-item
+                  - svc-order
+                  - svc-stock
+                  - svc-user
+                  - web-shop
+                  - loadgenerator
+                  - db-demo
+                  - nacos
+              topologyKey: "kubernetes.io/hostname"
+      containers:
+      - name: nacos
+        image: registry.cn-beijing.aliyuncs.com/deepflow-demo/skywalking-demo_mydemo-nacos-server:1.1.4
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: MYSQL_HOST
+          value: "db-demo"
+        - name: MODE
+          value: standalone
+        ports:
+        - containerPort: 8848
+        readinessProbe:
+          httpGet:
+            path: /nacos
+            port: 8848
+          initialDelaySeconds: 3
+          periodSeconds: 5
+          timeoutSeconds: 2
+---
+##################################################################################################
+# ConfigMaps
+##################################################################################################
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+       main:
+          allow-bean-definition-overriding: true
+       output:
+          ansi:
+             enabled: always
+    application:
+      name: item
+
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20880}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    zipkin:
+       server: ${zipkin.base-url}/api/v2/spans
+       connectTimeout: 5000
+       readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: item-service
+  namespace: deepflow-otel-spring-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+      main:
+          allow-bean-definition-overriding: true
+      output:
+          ansi:
+            enabled: always
+      datasource:
+          driver-class-name: ${mysql.driver-class}
+          url: jdbc:mysql://${mysql.host}:${mysql.port}/${db.order}?connectTimeout=3000&socketTimeout=10000&characterEncoding=utf8&useTimezone=true&serverTimezone=Asia/Shanghai&useSSL=false${jdbc.interceptors}
+          username: ${mysql.user}  
+          password: ${mysql.password}
+
+    application:
+      name: order 
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20883}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+        consumer:
+            filter: ${dubbo.filter}
+            # Fault tolerance. Options: failover/failfast/failsafe/failback/forking
+            cluster: failfast
+            # Load balancing strategy. Options: random, roundrobin(polling), leastactive(invoking least active service)
+            loadbalance: roundrobin
+
+    druid:
+      defaultAutoCommit: true
+      defaultTransactionIsolation: 2
+      initialSize: 1
+      maxActive: 3
+      maxWait: 10000
+      minIdle: 1
+      validationQuery: /* ping */ select 1
+      testOnBorrow: false
+      testOnReturn: false
+      testWhileIdle: true
+      timeBetweenEvictionRunsMillis: 60000
+      minEvictableIdleTimeMillis: 1800000
+      removeAbandoned: true
+      removeAbandonedTimeout: 1800
+      logAbandoned: true
+
+    mybatis: # see MybatisProperties.class in mybatis-spring-boot-autoconfigure
+      mapperLocations: classpath:mappers/*.xml
+
+    seata:
+      enabled: true
+      application-id: ${application.name}
+      tx-service-group: my_demo_gtx
+      config:
+        type: nacos
+        nacos:
+          namespace:
+          serverAddr: ${nacos.address}
+      registry:
+        type: nacos
+        nacos:
+          cluster: default
+          server-addr: ${nacos.address}
+          namespace:
+
+    zipkin:
+      server: ${zipkin.base-url}/api/v2/spans
+      connectTimeout: 5000
+      readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: order-service
+  namespace: deepflow-otel-spring-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    spring:
+        main:
+            allow-bean-definition-overriding: true
+        output:
+            ansi:
+                enabled: always
+
+    application:
+      name: stock
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20881}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    zipkin:
+       server: ${zipkin.base-url}/api/v2/spans
+       connectTimeout: 5000
+       readTimeout: 5000
+kind: ConfigMap
+metadata:
+  name: stock-service
+  namespace: deepflow-otel-spring-demo
+---
+apiVersion: v1
+data:
+  application.yml: |
+    mysql:
+      driver-class: com.mysql.jdbc.Driver
+      host: ${MYSQL_HOST:localhost}
+      port: ${MYSQL_PORT:3306}
+      user: mydemo
+      password: mydemo
+    db:
+      order: mydemo-dn1
+      user: mydemo-dn1
+    jdbc:
+      interceptors:
+    nacos:
+      address: ${NACOS_HOST:localhost}:8848
+
+    spring:
+      main:
+          allow-bean-definition-overriding: true
+      output:
+          ansi:
+            enabled: always
+      datasource:  
+          driver-class-name: ${mysql.driver-class}
+          url: jdbc:mysql://${mysql.host}:${mysql.port}/${db.user}?connectTimeout=3000&socketTimeout=10000&characterEncoding=utf8&useTimezone=true&serverTimezone=Asia/Shanghai&useSSL=false${jdbc.interceptors}
+          username: ${mysql.user}  
+          password: ${mysql.password}
+
+    application:
+      name: user
+
+    dubbo:
+        filter:
+        application: # see com.alibaba.dubbo.config.ApplicationConfig
+            id: ${application.name}-srv
+            name: ${application.name}-srv
+            qosEnable: false
+        protocol: # see com.alibaba.dubbo.config.ProtocolConfig
+            id: rest
+            name: rest
+            host: ${SERVICE_HOST:localhost} # Use POD IP to register Dubbo service
+            port: ${SERVICE_PORT:20882}
+            threads: 3
+            iothreads: 1
+            server: netty
+            client: netty
+            status: server
+            serialization: fst
+            queues: 0
+            keepAlive: true
+        registry: # see com.alibaba.dubbo.config.RegistryConfig
+            address: nacos://${nacos.address}
+            check: false
+        provider:
+            cluster: failfast
+            retries: 0
+            loadbalance: roundrobin
+            timeout: 10000
+            filter: ${dubbo.filter}
+
+    druid:
+      defaultAutoCommit: true
+      defaultTransactionIsolation: 2
+      initialSize: 1
+      maxActive: 3
+      maxWait: 5000
+      minIdle: 1
+      validationQuery: /* ping */ select 1
+      testOnBorrow: false
+      testOnReturn: false
+      testWhileIdle: true
+      timeBetweenEvictionRunsMillis: 60000
+      minEvictableIdleTimeMillis: 1800000 
+      removeAbandoned: true
+      removeAbandonedTimeout: 1800
+      logAbandoned: true
+
+    mybatis: # see MybatisProperties.class in mybatis-spring-boot-autoconfigure
+      mapperLocations: classpath:mappers/*.xml
+
+    seata:
+      enabled: true
+      application-id: ${application.name}
+      tx-service-group: my_demo_gtx
+      config:
+        type: nacos
+        nacos:
+          namespace:
+          serverAddr: ${nacos.address}
+      registry:
+        type: nacos
+        nacos:
+          cluster: default
+          server-addr: ${nacos.address}
+          namespace:
+
+    zipkin:
+      server: ${zipkin.base-url}/api/v2/spans
+      connectTimeout: 5000
+      readTimeout: 5000
+
+    mydemo:
+      hostname: ${HOSTNAME:localhost}
+kind: ConfigMap
+metadata:
+  name: user-service
+  namespace: deepflow-otel-spring-demo
+---
+##################################################################################################
+# Services
+##################################################################################################
+kind: Service
+apiVersion: v1
+metadata:
+  name: nacos
+  namespace: deepflow-otel-spring-demo
+spec:
+  type: ClusterIP
+  ports:
+  - name: http-nacos
+    port: 8848
+    targetPort: 8848
+  selector:
+    app: nacos
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: web-shop
+  namespace: deepflow-otel-spring-demo
+spec:
+  type: ClusterIP
+  ports:
+    - name: http-shop
+      port: 8090
+      targetPort: 8090
+  selector:
+    app: web-shop
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: db-demo
+  namespace: deepflow-otel-spring-demo
+spec:
+  type: ClusterIP
+  ports:
+  - name: mysql
+    port: 3306
+    targetPort: 3306
+  selector:
+    app: db-demo

+ 23 - 0
deepflow/deepflow/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 15 - 0
deepflow/deepflow/Chart.lock

@@ -0,0 +1,15 @@
+dependencies:
+- name: clickhouse
+  repository: ""
+  version: '*.*.*'
+- name: mysql
+  repository: ""
+  version: '*.*.*'
+- name: deepflow-agent
+  repository: ""
+  version: '*.*.*'
+- name: grafana
+  repository: https://grafana.github.io/helm-charts
+  version: 6.52.1
+digest: sha256:b501c00be925bcef16afa80b5bd248bcbfcbc4415143d900309e31c03fb450d2
+generated: "2023-03-08T10:06:16.75352841Z"

+ 33 - 0
deepflow/deepflow/Chart.yaml

@@ -0,0 +1,33 @@
+apiVersion: v2
+appVersion: 6.2.4
+dependencies:
+- condition: clickhouse.enabled
+  name: clickhouse
+  repository: ""
+  version: '*.*.*'
+- condition: mysql.enabled
+  name: mysql
+  repository: ""
+  version: '*.*.*'
+- condition: deepflow-agent.enabled
+  name: deepflow-agent
+  repository: ""
+  version: '*.*.*'
+- condition: grafana.enabled
+  name: grafana
+  repository: https://grafana.github.io/helm-charts
+  version: 6.*.*
+description: An automated observability platform for cloud-native developers.
+home: https://github.com/deepflowys/deepflow
+icon: https://raw.githubusercontent.com/deepflowys/deepflow-charts/main/deepflow.svg
+keywords:
+- deepflow
+kubeVersion: '>=1.16.0-0'
+maintainers:
+- name: deepflow
+  url: https://github.com/deepflowys/deepflow
+name: deepflow
+sources:
+- https://github.com/deepflowys/deepflow
+type: application
+version: 6.2.401

+ 181 - 0
deepflow/deepflow/README.md

@@ -0,0 +1,181 @@
+# DeepFlow Helm Charts
+
+
+This repository contains [Helm](https://helm.sh/) charts for DeepFlow project.
+
+## Usage
+
+### Prerequisites
+
+- Kubernetes 1.16+
+- Helm 3+
+
+[Helm](https://helm.sh) must be installed to use the charts.
+Please refer to Helm's [documentation](https://helm.sh/docs/) to get started.
+
+Once Helm is set up properly, add the repo as follows:
+
+```console
+helm repo add deepflow https://deepflowys.github.io/deepflow
+helm repo udpate deepflow
+```
+
+## Helm Charts
+
+You can then run `helm search repo deepflow` to see the charts.
+
+_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
+
+## Installing the Chart
+
+To install the chart with the release name `deepflow`:
+
+```console
+helm install deepflow -n deepflow deepflow/deepflow --create-namespace
+```
+
+## Uninstalling the Chart
+
+To uninstall/delete the my-release deployment:
+
+```console
+helm delete deepflow -n deepflow
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Dependencies
+
+By default this chart installs additional, dependent charts:
+
+- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
+
+
+
+## Main values block usage:
+
+### Global
+
+```yaml
+  password: 
+    mysql: deepflow ## mysql root account password
+    grafana: deepflow ## grafana admin account password
+  replicas: 1 ## Number of replicas for deepflow-server and clickhouse
+  nodePort: ## NodePort that requires a fixed port
+    clickhouse: 30900
+    deepflowServerIngester: 30033
+    deepflowServerGrpc: 30035
+    deepflowServerSslGrpc: 30135
+    deepflowServerhealthCheck: 30417
+  ntpServer: ntp.aliyun.com ## ntp server address, you need to ensure that udp 123 port is available
+  allInOneLocalStorage: false   ## Whether to enable allInone local storage, if enabled, the local /opt directory is used to store data by default, ignoring the node affinity check, and is not responsible for any data persistence
+```
+
+
+### Affinity:
+
+The affinity of component. Combine `global.affinity` by 'OR'.
+
+- podAntiAffinityLabelSelector: affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+  podAntiAffinityLabelSelector: 
+      - labelSelector:
+        - key: app #your label key
+          operator: In # In、NotIn、Exists、 DoesNotExist
+          values: deepflow #your label value, Multiple values separated by commas
+        - key: component 
+          operator: In
+          values: deepflow-server,deepflowys
+        topologyKey: "kubernetes.io/hostname"
+  ```
+
+- podAntiAffinityTermLabelSelector: affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+  podAntiAffinityLabelSelector: 
+      - labelSelector:
+        - key: app # your label key
+          operator: In # In、NotIn、Exists、 DoesNotExist
+          values: deepflow # your label value, Multiple values separated by commas
+        - key: component 
+          operator: In
+          values: deepflow-server,deepflowys
+        topologyKey: "kubernetes.io/hostname"
+  ```
+
+- podAffinityLabelSelector: affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+    podAffinityLabelSelector:
+      - labelSelector:
+        - key: app
+          operator: In
+          values: deepflow
+        - key: component
+          operator: In
+          values: clickhouse
+        topologyKey: "kubernetes.io/hostname"
+  ```
+
+- podAffinityTermLabelSelector: affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+    podAffinityTermLabelSelector:
+      - topologyKey: kubernetes.io/hostname
+        weight: 10
+        labelSelector:
+          - key: app
+            operator: In
+            values: deepflow,deepflowys
+  ```
+
+- nodeAffinityLabelSelector: affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+    nodeAffinityLabelSelector:
+      - matchExpressions:
+          - key: app
+            operator: In
+            values: deepflow,deepflowys
+  ```
+
+- nodeAffinityTermLabelSelector: affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution
+
+  ```yaml
+    nodeAffinityTermLabelSelector:
+      - weight: 10
+        matchExpressions:
+        - key: app
+          operator: In
+          values: deepflow,deepflowys
+  ```
+
+### Storage config
+
+```yaml
+  storageConfig:
+    type: persistentVolumeClaim  ## persistentVolumeClaim or hostPath,If you use hostPath, you must configure nodeAffinityLabelSelector, otherwise your data will be lost when Pod drifts
+    generateType: "{{ if $.Values.global.allInOneLocalStorage }}hostPath{{ else }}{{$.Values.storageConfig.type}}{{end}}" #Please ignore this
+    hostPath: /opt/deepflow-clickhouse ## your hostPath path
+    persistence: ## volumeClaimTemplates configuration
+      - name: clickhouse-path
+        accessModes:
+        - ReadWriteOnce
+        size: 100Gi
+        annotations: 
+        storageClass: "-"
+        # selector:
+        #   matchLabels:
+        #     app.kubernetes.io/name: clickhouse
+      - name: clickhouse-storage-path
+        accessModes:
+        - ReadWriteOnce
+        size: 200Gi
+        annotations: 
+        storageClass: "-"
+        # selector:
+        #   matchLabels:
+        #     app.kubernetes.io/name: clickhouse
+    s3StorageEnabled: false
+```

+ 23 - 0
deepflow/deepflow/charts/clickhouse/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 6 - 0
deepflow/deepflow/charts/clickhouse/Chart.yaml

@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 21.8.15.7
+description: A Helm chart for Kubernetes
+name: clickhouse
+type: application
+version: 0.1.000

+ 255 - 0
deepflow/deepflow/charts/clickhouse/templates/_affinity.tpl

@@ -0,0 +1,255 @@
+{{/* affinity - https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ */}}
+
+{{- define "nodeaffinity" }}
+  nodeAffinity:
+    requiredDuringSchedulingIgnoredDuringExecution:
+    {{- include "nodeAffinityRequiredDuringScheduling" . }}
+    preferredDuringSchedulingIgnoredDuringExecution:
+    {{- include "nodeAffinityPreferredDuringScheduling" . }}
+{{- end }}
+
+{{- define "nodeAffinityRequiredDuringScheduling" }}
+    {{- if or .Values.nodeAffinityLabelSelector .Values.global.nodeAffinityLabelSelector }}
+      nodeSelectorTerms:
+      {{- range $matchExpressionsIndex, $matchExpressionsItem := .Values.nodeAffinityLabelSelector }}
+        - matchExpressions:
+        {{- range $Index, $item := $matchExpressionsItem.matchExpressions }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+      {{- end }}
+      {{- range $matchExpressionsIndex, $matchExpressionsItem := .Values.global.nodeAffinityLabelSelector }}
+        - matchExpressions:
+        {{- range $Index, $item := $matchExpressionsItem.matchExpressions }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+      {{- end }}
+    {{- end }}
+{{- end }}
+
+{{- define "nodeAffinityPreferredDuringScheduling" }}
+    {{- range $weightIndex, $weightItem := .Values.nodeAffinityTermLabelSelector }}
+    - weight: {{ $weightItem.weight }}
+      preference:
+        matchExpressions:
+      {{- range $Index, $item := $weightItem.matchExpressions }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+      {{- end }}
+    {{- end }}
+    {{- range $weightIndex, $weightItem := .Values.global.nodeAffinityTermLabelSelector }}
+    - weight: {{ $weightItem.weight }}
+      preference:
+        matchExpressions:
+      {{- range $Index, $item := $weightItem.matchExpressions }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+      {{- end }}
+    {{- end }}
+{{- end }}
+
+
+{{- define "podAffinity" }}
+{{- if or .Values.podAffinityLabelSelector .Values.podAffinityTermLabelSelector}}
+  podAffinity:
+    {{- if .Values.podAffinityLabelSelector }}
+    requiredDuringSchedulingIgnoredDuringExecution:
+    {{- include "podAffinityRequiredDuringScheduling" . }}
+    {{- end }}
+    {{- if or .Values.podAffinityTermLabelSelector}}
+    preferredDuringSchedulingIgnoredDuringExecution:
+    {{- include "podAffinityPreferredDuringScheduling" . }}
+    {{- end }}
+{{- end }}
+{{- end }}
+
+{{- define "podAffinityRequiredDuringScheduling" }}
+    {{- range $labelSelector, $labelSelectorItem := .Values.podAffinityLabelSelector }}
+    - labelSelector:
+        matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+        {{- end }}
+      topologyKey: {{ $labelSelectorItem.topologyKey }}
+    {{- end }}
+    {{- range $labelSelector, $labelSelectorItem := .Values.global.podAffinityLabelSelector }}
+    - labelSelector:
+        matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+        {{- end }}
+      topologyKey: {{ $labelSelectorItem.topologyKey }}
+    {{- end }}
+{{- end }}
+
+{{- define "podAffinityPreferredDuringScheduling" }}
+    {{- range $labelSelector, $labelSelectorItem := .Values.podAffinityTermLabelSelector }}
+    - podAffinityTerm:
+        labelSelector:
+          matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+        {{- end }}
+        topologyKey: {{ $labelSelectorItem.topologyKey }}
+      weight:  {{ $labelSelectorItem.weight }}
+    {{- end }}
+    {{- range $labelSelector, $labelSelectorItem := .Values.global.podAffinityTermLabelSelector }}
+    - podAffinityTerm:
+        labelSelector:
+          matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+        {{- end }}
+        topologyKey: {{ $labelSelectorItem.topologyKey }}
+      weight:  {{ $labelSelectorItem.weight }}
+    {{- end }}
+{{- end }}
+
+{{- define "podAntiAffinity" }}
+{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}}
+  podAntiAffinity:
+    {{- if .Values.podAntiAffinityLabelSelector }}
+    requiredDuringSchedulingIgnoredDuringExecution:
+    {{- include "podAntiAffinityRequiredDuringScheduling" . }}
+    {{- end }}
+    {{- if or .Values.podAntiAffinityTermLabelSelector}}
+    preferredDuringSchedulingIgnoredDuringExecution:
+    {{- include "podAntiAffinityPreferredDuringScheduling" . }}
+    {{- end }}
+{{- end }}
+{{- end }}
+
+{{- define "podAntiAffinityRequiredDuringScheduling" }}
+    {{- range $labelSelectorIndex, $labelSelectorItem := .Values.podAntiAffinityLabelSelector }}
+    - labelSelector:
+        matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+        {{- end }}
+      topologyKey: {{ $labelSelectorItem.topologyKey }}
+    {{- end }}
+    {{- range $labelSelectorIndex, $labelSelectorItem := .Values.global.podAntiAffinityLabelSelector }}
+    - labelSelector:
+        matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+        - key: {{ $item.key }}
+          operator: {{ $item.operator }}
+          {{- if $item.values }}
+          values:
+          {{- $vals := split "," $item.values }}
+          {{- range $i, $v := $vals }}
+          - {{ $v | quote }}
+          {{- end }}
+          {{- end }}
+        {{- end }}
+      topologyKey: {{ $labelSelectorItem.topologyKey }}
+    {{- end }}
+{{- end }}
+
+{{- define "podAntiAffinityPreferredDuringScheduling" }}
+    {{- range $labelSelectorIndex, $labelSelectorItem := .Values.podAntiAffinityTermLabelSelector }}
+    - podAffinityTerm:
+        labelSelector:
+          matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+        {{- end }}
+        topologyKey: {{ $labelSelectorItem.topologyKey }}
+      weight: {{ $labelSelectorItem.weight }}
+    {{- end }}
+    {{- range $labelSelectorIndex, $labelSelectorItem := .Values.global.podAntiAffinityTermLabelSelector }}
+    - podAffinityTerm:
+        labelSelector:
+          matchExpressions:
+      {{- range $index, $item := $labelSelectorItem.labelSelector }}
+          - key: {{ $item.key }}
+            operator: {{ $item.operator }}
+            {{- if $item.values }}
+            values:
+            {{- $vals := split "," $item.values }}
+            {{- range $i, $v := $vals }}
+            - {{ $v | quote }}
+            {{- end }}
+            {{- end }}
+        {{- end }}
+        topologyKey: {{ $labelSelectorItem.topologyKey }}
+      weight: {{ $labelSelectorItem.weight }}
+    {{- end }}
+{{- end }}

Some files were not shown because too many files changed in this diff