yawyd il y a 1 an
Parent
commit
8c09984f9b
100 fichiers modifiés avec 11366 ajouts et 12 suppressions
  1. 20 0
      clickhouse/clickhouse_etc/conf.d/00_default_overrides.xml
  2. 787 0
      clickhouse/clickhouse_etc/config.xml
  3. 102 0
      clickhouse/clickhouse_etc/users.xml
  4. 18 0
      clickhouse/templates/configmap-users.yaml
  5. 9 0
      clickhouse/templates/statefulset.yaml
  6. 113 0
      clickhouse/values.yaml
  7. 20 0
      clickhouse_etc/conf.d/00_default_overrides.xml
  8. 787 0
      clickhouse_etc/config.xml
  9. 102 0
      clickhouse_etc/users.xml
  10. 24 6
      deepflow/deepflow-otel-spring-demo.yaml
  11. 48 0
      grafana/templates/quota.yml
  12. 7 6
      grafana/values.yaml
  13. 21 0
      kafka/helm/kafka/.helmignore
  14. 9 0
      kafka/helm/kafka/Chart.lock
  15. 33 0
      kafka/helm/kafka/Chart.yaml
  16. 1073 0
      kafka/helm/kafka/README.md
  17. 22 0
      kafka/helm/kafka/charts/common/.helmignore
  18. 23 0
      kafka/helm/kafka/charts/common/Chart.yaml
  19. 235 0
      kafka/helm/kafka/charts/common/README.md
  20. 106 0
      kafka/helm/kafka/charts/common/templates/_affinities.tpl
  21. 180 0
      kafka/helm/kafka/charts/common/templates/_capabilities.tpl
  22. 23 0
      kafka/helm/kafka/charts/common/templates/_errors.tpl
  23. 80 0
      kafka/helm/kafka/charts/common/templates/_images.tpl
  24. 68 0
      kafka/helm/kafka/charts/common/templates/_ingress.tpl
  25. 18 0
      kafka/helm/kafka/charts/common/templates/_labels.tpl
  26. 66 0
      kafka/helm/kafka/charts/common/templates/_names.tpl
  27. 165 0
      kafka/helm/kafka/charts/common/templates/_secrets.tpl
  28. 23 0
      kafka/helm/kafka/charts/common/templates/_storage.tpl
  29. 13 0
      kafka/helm/kafka/charts/common/templates/_tplvalues.tpl
  30. 62 0
      kafka/helm/kafka/charts/common/templates/_utils.tpl
  31. 14 0
      kafka/helm/kafka/charts/common/templates/_warnings.tpl
  32. 72 0
      kafka/helm/kafka/charts/common/templates/validations/_cassandra.tpl
  33. 103 0
      kafka/helm/kafka/charts/common/templates/validations/_mariadb.tpl
  34. 108 0
      kafka/helm/kafka/charts/common/templates/validations/_mongodb.tpl
  35. 103 0
      kafka/helm/kafka/charts/common/templates/validations/_mysql.tpl
  36. 129 0
      kafka/helm/kafka/charts/common/templates/validations/_postgresql.tpl
  37. 76 0
      kafka/helm/kafka/charts/common/templates/validations/_redis.tpl
  38. 46 0
      kafka/helm/kafka/charts/common/templates/validations/_validations.tpl
  39. 5 0
      kafka/helm/kafka/charts/common/values.yaml
  40. 21 0
      kafka/helm/kafka/charts/zookeeper/.helmignore
  41. 6 0
      kafka/helm/kafka/charts/zookeeper/Chart.lock
  42. 24 0
      kafka/helm/kafka/charts/zookeeper/Chart.yaml
  43. 522 0
      kafka/helm/kafka/charts/zookeeper/README.md
  44. 22 0
      kafka/helm/kafka/charts/zookeeper/charts/common/.helmignore
  45. 23 0
      kafka/helm/kafka/charts/zookeeper/charts/common/Chart.yaml
  46. 235 0
      kafka/helm/kafka/charts/zookeeper/charts/common/README.md
  47. 106 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl
  48. 180 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl
  49. 23 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_errors.tpl
  50. 80 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_images.tpl
  51. 68 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl
  52. 18 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_labels.tpl
  53. 66 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_names.tpl
  54. 165 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl
  55. 23 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_storage.tpl
  56. 13 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl
  57. 62 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_utils.tpl
  58. 14 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl
  59. 72 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl
  60. 103 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl
  61. 108 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl
  62. 103 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl
  63. 129 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl
  64. 76 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl
  65. 46 0
      kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl
  66. 5 0
      kafka/helm/kafka/charts/zookeeper/charts/common/values.yaml
  67. 76 0
      kafka/helm/kafka/charts/zookeeper/templates/NOTES.txt
  68. 361 0
      kafka/helm/kafka/charts/zookeeper/templates/_helpers.tpl
  69. 17 0
      kafka/helm/kafka/charts/zookeeper/templates/configmap.yaml
  70. 4 0
      kafka/helm/kafka/charts/zookeeper/templates/extra-list.yaml
  71. 29 0
      kafka/helm/kafka/charts/zookeeper/templates/metrics-svc.yaml
  72. 41 0
      kafka/helm/kafka/charts/zookeeper/templates/networkpolicy.yaml
  73. 26 0
      kafka/helm/kafka/charts/zookeeper/templates/pdb.yaml
  74. 27 0
      kafka/helm/kafka/charts/zookeeper/templates/prometheusrule.yaml
  75. 102 0
      kafka/helm/kafka/charts/zookeeper/templates/scripts-configmap.yaml
  76. 77 0
      kafka/helm/kafka/charts/zookeeper/templates/secrets.yaml
  77. 21 0
      kafka/helm/kafka/charts/zookeeper/templates/serviceaccount.yaml
  78. 53 0
      kafka/helm/kafka/charts/zookeeper/templates/servicemonitor.yaml
  79. 532 0
      kafka/helm/kafka/charts/zookeeper/templates/statefulset.yaml
  80. 42 0
      kafka/helm/kafka/charts/zookeeper/templates/svc-headless.yaml
  81. 71 0
      kafka/helm/kafka/charts/zookeeper/templates/svc.yaml
  82. 57 0
      kafka/helm/kafka/charts/zookeeper/templates/tls-secrets.yaml
  83. 879 0
      kafka/helm/kafka/charts/zookeeper/values.yaml
  84. 314 0
      kafka/helm/kafka/templates/NOTES.txt
  85. 555 0
      kafka/helm/kafka/templates/_helpers.tpl
  86. 17 0
      kafka/helm/kafka/templates/configmap.yaml
  87. 4 0
      kafka/helm/kafka/templates/extra-list.yaml
  88. 95 0
      kafka/helm/kafka/templates/jaas-secret.yaml
  89. 68 0
      kafka/helm/kafka/templates/jmx-configmap.yaml
  90. 34 0
      kafka/helm/kafka/templates/jmx-metrics-svc.yaml
  91. 171 0
      kafka/helm/kafka/templates/kafka-metrics-deployment.yaml
  92. 16 0
      kafka/helm/kafka/templates/kafka-metrics-serviceaccount.yaml
  93. 34 0
      kafka/helm/kafka/templates/kafka-metrics-svc.yaml
  94. 19 0
      kafka/helm/kafka/templates/kafka-provisioning-secret.yaml
  95. 15 0
      kafka/helm/kafka/templates/kafka-provisioning-serviceaccount.yaml
  96. 265 0
      kafka/helm/kafka/templates/kafka-provisioning.yaml
  97. 17 0
      kafka/helm/kafka/templates/log4j-configmap.yaml
  98. 22 0
      kafka/helm/kafka/templates/networkpolicy-egress.yaml
  99. 53 0
      kafka/helm/kafka/templates/networkpolicy-ingress.yaml
  100. 26 0
      kafka/helm/kafka/templates/poddisruptionbudget.yaml

+ 20 - 0
clickhouse/clickhouse_etc/conf.d/00_default_overrides.xml

@@ -0,0 +1,20 @@
+<clickhouse>
+  <!-- Macros -->
+  <macros>
+    <shard from_env="CLICKHOUSE_SHARD_ID"></shard>
+    <replica from_env="CLICKHOUSE_REPLICA_ID"></replica>
+    <layer>clickhouse</layer>
+  </macros>
+  <!-- Log Level -->
+  <logger>
+    <level>information</level>
+  </logger>
+  <!-- Zookeeper configuration -->
+  <zookeeper>
+    
+    <node>
+      <host from_env="KEEPER_NODE_0"></host>
+      <port>2181</port>
+    </node>
+  </zookeeper>
+</clickhouse>

+ 787 - 0
clickhouse/clickhouse_etc/config.xml

@@ -0,0 +1,787 @@
+<?xml version="1.0"?>
+<!--
+  NOTE: User and query level settings are set up in "users.xml" file.
+  If you have accidentally specified user-level settings here, server won't start.
+  You can either move the settings to the right place inside "users.xml" file
+   or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
+-->
+<clickhouse><logger><!-- Possible levels [1]:
+
+          - none (turns off logging)
+          - fatal
+          - critical
+          - error
+          - warning
+          - notice
+          - information
+          - debug
+          - trace
+          - test (not for production usage)
+
+            [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
+        --><level>trace</level><!-- Rotation policy
+             See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
+          --><size>1000M</size><count>10</count><!-- <console>1</console> --><!-- Default behavior is autodetection (log to console if not daemon mode and is tty) --><!-- Per level overrides (legacy):
+
+        For example to suppress logging of the ConfigReloader you can use:
+        NOTE: levels.logger is reserved, see below.
+        --><!--
+        <levels>
+          <ConfigReloader>none</ConfigReloader>
+        </levels>
+        --><!-- Per level overrides:
+
+        For example to suppress logging of the RBAC for default user you can use:
+        (But please note that the logger name maybe changed from version to version, even after minor upgrade)
+        --><!--
+        <levels>
+          <logger>
+            <name>ContextAccess (default)</name>
+            <level>none</level>
+          </logger>
+          <logger>
+            <name>DatabaseOrdinary (test)</name>
+            <level>none</level>
+          </logger>
+        </levels>
+        --><!-- Structured log formatting:
+        You can specify log format(for now, JSON only). In that case, the console log will be printed
+        in specified format like JSON.
+        For example, as below:
+        {"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
+        To enable JSON logging support, please uncomment the entire <formatting> tag below.
+
+        a) You can modify key names by changing values under tag values inside <names> tag.
+        For example, to change DATE_TIME to MY_DATE_TIME, you can do like:
+            <date_time>MY_DATE_TIME</date_time>
+        b) You can stop unwanted log properties to appear in logs. To do so, you can simply comment out (recommended)
+        that property from this file.
+        For example, if you do not want your log to print query_id, you can comment out only <query_id> tag.
+        However, if you comment out all the tags under <names>, the program will print default values for as
+        below.
+        --><!-- <formatting>
+            <type>json</type>
+            <names>
+                <date_time>date_time</date_time>
+                <thread_name>thread_name</thread_name>
+                <thread_id>thread_id</thread_id>
+                <level>level</level>
+                <query_id>query_id</query_id>
+                <logger_name>logger_name</logger_name>
+                <message>message</message>
+                <source_file>source_file</source_file>
+                <source_line>source_line</source_line>
+            </names>
+        </formatting> --><console>1</console></logger><!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. --><!-- It is off by default. Next headers are obligate for CORS.--><!-- http_options_response>
+        <header>
+            <name>Access-Control-Allow-Origin</name>
+            <value>*</value>
+        </header>
+        <header>
+            <name>Access-Control-Allow-Headers</name>
+            <value>origin, x-requested-with</value>
+        </header>
+        <header>
+            <name>Access-Control-Allow-Methods</name>
+            <value>POST, GET, OPTIONS</value>
+        </header>
+        <header>
+            <name>Access-Control-Max-Age</name>
+            <value>86400</value>
+        </header>
+    </http_options_response --><!-- It is the name that will be shown in the clickhouse-client.
+         By default, anything with "production" will be highlighted in red in query prompt.
+    --><!--display_name>production</display_name--><!-- Port for HTTP API. See also 'https_port' for secure connections.
+         This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
+         and by most of web interfaces (embedded UI, Grafana, Redash, ...).
+      --><!-- Port for interaction by native protocol with:
+         - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark, clickhouse-copier);
+         - clickhouse-server with other clickhouse-servers for distributed query processing;
+         - ClickHouse drivers and applications supporting native protocol
+         (this protocol is also informally called as "the TCP protocol");
+         See also 'tcp_port_secure' for secure connections.
+    --><!-- Compatibility with MySQL protocol.
+         ClickHouse will pretend to be MySQL for applications connecting to this port.
+    --><!-- Compatibility with PostgreSQL protocol.
+         ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
+    --><!-- HTTP API with TLS (HTTPS).
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+    --><!-- <https_port>8443</https_port> --><!-- Native interface with TLS.
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+    --><!-- <tcp_port_secure>9440</tcp_port_secure> --><!-- Native interface wrapped with PROXYv1 protocol
+         PROXYv1 header sent for every connection.
+         ClickHouse will extract information about proxy-forwarded client address from the header.
+    --><!-- <tcp_with_proxy_port>9011</tcp_with_proxy_port> --><!-- Port for communication between replicas. Used for data exchange.
+         It provides low-level data access between servers.
+         This port should not be accessible from untrusted networks.
+         See also 'interserver_http_credentials'.
+         Data transferred over connections to this port should not go through untrusted networks.
+         See also 'interserver_https_port'.
+      --><!-- Port for communication between replicas with TLS.
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+         See also 'interserver_http_credentials'.
+      --><!-- <interserver_https_port>9010</interserver_https_port> --><!-- Hostname that is used by other replicas to request this server.
+         If not specified, then it is determined analogous to 'hostname -f' command.
+         This setting could be used to switch replication to another network interface
+         (the server may be connected to multiple networks via multiple addresses)
+      --><!--
+    <interserver_http_host>example.clickhouse.com</interserver_http_host>
+    --><!-- You can specify credentials for authenthication between replicas.
+         This is required when interserver_https_port is accessible from untrusted networks,
+         and also recommended to avoid SSRF attacks from possibly compromised services in your network.
+      --><!--<interserver_http_credentials>
+        <user>interserver</user>
+        <password></password>
+    </interserver_http_credentials>--><!-- Listen specified address.
+         Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
+         Notes:
+         If you open connections from wildcard address, make sure that at least one of the following measures applied:
+         - server is protected by firewall and not accessible from untrusted networks;
+         - all users are restricted to subset of network addresses (see users.xml);
+         - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
+         - users without password have readonly access.
+         See also: https://www.shodan.io/search?query=clickhouse
+      --><!-- <listen_host>::</listen_host> --><!-- Same for hosts without support for IPv6: --><!-- <listen_host>0.0.0.0</listen_host> --><!-- Default values - try listen localhost on IPv4 and IPv6. --><!--
+    <listen_host>::1</listen_host>
+    <listen_host>127.0.0.1</listen_host>
+    --><!-- <interserver_listen_host>::</interserver_listen_host> --><!-- Listen host for communication between replicas. Used for data exchange --><!-- Default values - equal to listen_host --><!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. --><!-- <listen_try>0</listen_try> --><!-- Allow multiple servers to listen on the same address:port. This is not recommended.
+      --><!-- <listen_reuse_port>0</listen_reuse_port> --><!-- <listen_backlog>4096</listen_backlog> --><max_connections>4096</max_connections><!-- For 'Connection: keep-alive' in HTTP 1.1 --><keep_alive_timeout>3</keep_alive_timeout><!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) --><!-- <grpc_port>9100</grpc_port> --><grpc><enable_ssl>false</enable_ssl><!-- The following two files are used only if enable_ssl=1 --><ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file><ssl_key_file>/path/to/ssl_key_file</ssl_key_file><!-- Whether server will request client for a certificate --><ssl_require_client_auth>false</ssl_require_client_auth><!-- The following file is used only if ssl_require_client_auth=1 --><ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file><!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).
+             Supported algorithms: none, deflate, gzip, stream_gzip --><transport_compression_type>none</transport_compression_type><!-- Default transport compression level. Supported levels: 0..3 --><transport_compression_level>0</transport_compression_level><!-- Send/receive message size limits in bytes. -1 means unlimited --><max_send_message_size>-1</max_send_message_size><max_receive_message_size>-1</max_receive_message_size><!-- Enable if you want very detailed logs --><verbose_logs>false</verbose_logs></grpc><!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 --><openSSL><server><!-- Used for https server AND secure tcp port --><!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt --><!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
+            <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> --><!-- dhparams are optional. You can delete the <dhParamsFile> element.
+                 To generate dhparams, use the following command:
+                  openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
+                 Only file format with BEGIN DH PARAMETERS is supported.
+              --><!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>--><verificationMode>none</verificationMode><loadDefaultCAFile>true</loadDefaultCAFile><cacheSessions>true</cacheSessions><disableProtocols>sslv2,sslv3</disableProtocols><preferServerCiphers>true</preferServerCiphers></server><client><!-- Used for connecting to https dictionary source and secured Zookeeper communication --><loadDefaultCAFile>true</loadDefaultCAFile><cacheSessions>true</cacheSessions><disableProtocols>sslv2,sslv3</disableProtocols><preferServerCiphers>true</preferServerCiphers><!-- Use for self-signed: <verificationMode>none</verificationMode> --><invalidCertificateHandler><!-- Use for self-signed: <name>AcceptCertificateHandler</name> --><name>RejectCertificateHandler</name></invalidCertificateHandler></client></openSSL><!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 --><!--
+    <http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
+    --><!-- The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries.
+         This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run.
+         Query can upscale to desired number of threads during execution if more threads become available.
+    --><concurrent_threads_soft_limit_num>0</concurrent_threads_soft_limit_num><concurrent_threads_soft_limit_ratio_to_cores>0</concurrent_threads_soft_limit_ratio_to_cores><!-- Maximum number of concurrent queries. --><max_concurrent_queries>100</max_concurrent_queries><!-- Maximum memory usage (resident set size) for server process.
+         Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
+         If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
+
+         The constraint is checked on query execution time.
+         If a query tries to allocate memory and the current memory usage plus allocation is greater
+          than specified threshold, exception will be thrown.
+
+         It is not practical to set this constraint to small values like just a few gigabytes,
+          because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
+      --><max_server_memory_usage>0</max_server_memory_usage><!-- Maximum number of threads in the Global thread pool.
+    This will default to a maximum of 10000 threads if not specified.
+    This setting will be useful in scenarios where there are a large number
+    of distributed queries that are running concurrently but are idling most
+    of the time, in which case a higher number of threads might be required.
+    --><max_thread_pool_size>10000</max_thread_pool_size><!-- Configure other thread pools: --><!--
+    <background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
+    <background_pool_size>16</background_pool_size>
+    <background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
+    <background_merges_mutations_scheduling_policy>round_robin</background_merges_mutations_scheduling_policy>
+    <background_move_pool_size>8</background_move_pool_size>
+    <background_fetches_pool_size>8</background_fetches_pool_size>
+    <background_common_pool_size>8</background_common_pool_size>
+    <background_schedule_pool_size>128</background_schedule_pool_size>
+    <background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
+    <background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
+    --><!-- On memory constrained environments you may have to set this to value larger than 1.
+      --><max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio><!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
+         Data will be stored in system.trace_log table with query_id = empty string.
+         Zero means disabled.
+      --><total_memory_profiler_step>4194304</total_memory_profiler_step><!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
+         The probability is for every alloc/free regardless to the size of the allocation.
+         Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
+          which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
+         You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
+      --><total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability><!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
+         correct maximum value. --><!-- <max_open_files>262144</max_open_files> --><!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
+         In bytes. Cache is single for server. Memory is allocated only on demand.
+         Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
+         Uncompressed cache is advantageous only for very short queries and in rare cases.
+
+         Note: uncompressed cache can be pointless for lz4, because memory bandwidth
+         is slower than multi-core decompression on some server configurations.
+         Enabling it can sometimes paradoxically make queries slower.
+      --><uncompressed_cache_size>8589934592</uncompressed_cache_size><!-- Approximate size of mark cache, used in tables of MergeTree family.
+         In bytes. Cache is single for server. Memory is allocated only on demand.
+         You should not lower this value.
+      --><mark_cache_size>5368709120</mark_cache_size><!-- If you enable the `min_bytes_to_use_mmap_io` setting,
+         the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
+         It makes sense only for large files and helps only if data reside in page cache.
+         To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
+         and to reuse mappings from several threads and queries,
+         the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
+         The amount of data in mapped files can be monitored
+         in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
+         and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
+         and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
+         CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
+         Note that the amount of data in mapped files does not consume memory directly and is not accounted
+         in query or server memory usage - because this memory can be discarded similar to OS page cache.
+         The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
+         also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
+      --><mmap_cache_size>1000</mmap_cache_size><!-- Cache size in bytes for compiled expressions.--><compiled_expression_cache_size>134217728</compiled_expression_cache_size><!-- Cache size in elements for compiled expressions.--><compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size><!-- Path to data directory, with trailing slash. --><path>/bitnami/clickhouse/data</path><!-- Multi-disk configuration example: --><!--
+    <storage_configuration>
+        <disks>
+            <default>
+                <keep_free_space_bytes>0</keep_free_space_bytes>
+            </default>
+            <data>
+                <path>/data/</path>
+                <keep_free_space_bytes>0</keep_free_space_bytes>
+            </data>
+            <s3>
+                <type>s3</type>
+                <endpoint>http://path/to/endpoint</endpoint>
+                <access_key_id>your_access_key_id</access_key_id>
+                <secret_access_key>your_secret_access_key</secret_access_key>
+            </s3>
+            <blob_storage_disk>
+                <type>azure_blob_storage</type>
+                <storage_account_url>http://account.blob.core.windows.net</storage_account_url>
+                <container_name>container</container_name>
+                <account_name>account</account_name>
+                <account_key>pass123</account_key>
+                <metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
+                <cache_enabled>true</cache_enabled>
+                <cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
+                <skip_access_check>false</skip_access_check>
+            </blob_storage_disk>
+        </disks>
+
+        <policies>
+            <all>
+                <volumes>
+                    <main>
+                        <disk>default</disk>
+                        <disk>data</disk>
+                        <disk>s3</disk>
+                        <disk>blob_storage_disk</disk>
+
+                        <max_data_part_size_bytes></max_data_part_size_bytes>
+                        <max_data_part_size_ratio></max_data_part_size_ratio>
+                        <perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
+                        <prefer_not_to_merge>false</prefer_not_to_merge>
+                        <load_balancing>round_robin</load_balancing>
+                    </main>
+                </volumes>
+                <move_factor>0.2</move_factor>
+            </all>
+        </policies>
+    </storage_configuration>
+    --><!-- Path to temporary data for processing hard queries. --><tmp_path>/var/lib/clickhouse/tmp/</tmp_path><!-- Disable AuthType plaintext_password and no_password for ACL. --><allow_plaintext_password>1</allow_plaintext_password><allow_no_password>1</allow_no_password><allow_implicit_no_password>1</allow_implicit_no_password><!-- Complexity requirements for user passwords. --><!-- <password_complexity>
+        <rule>
+            <pattern>.{12}</pattern>
+            <message>be at least 12 characters long</message>
+        </rule>
+        <rule>
+            <pattern>\p{N}</pattern>
+            <message>contain at least 1 numeric character</message>
+        </rule>
+        <rule>
+            <pattern>\p{Ll}</pattern>
+            <message>contain at least 1 lowercase character</message>
+        </rule>
+        <rule>
+            <pattern>\p{Lu}</pattern>
+            <message>contain at least 1 uppercase character</message>
+        </rule>
+        <rule>
+            <pattern>[^\p{L}\p{N}]</pattern>
+            <message>contain at least 1 special character</message>
+        </rule>
+    </password_complexity> --><!-- Policy from the <storage_configuration> for the temporary files.
+         If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
+
+         Notes:
+         - move_factor              is ignored
+         - keep_free_space_bytes    is ignored
+         - max_data_part_size_bytes is ignored
+         - you must have exactly one volume in that policy
+    --><!-- <tmp_policy>tmp</tmp_policy> --><!-- Directory with user provided files that are accessible by 'file' table function. --><user_files_path>/var/lib/clickhouse/user_files/</user_files_path><!-- LDAP server definitions. --><ldap_servers><!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
+              who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
+             Parameters:
+                host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
+                port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
+                bind_dn - template used to construct the DN to bind to.
+                        The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
+                         user name during each authentication attempt.
+                user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
+                        This is mainly used in search filters for further role mapping when the server is Active Directory. The
+                         resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
+                         user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
+                         user DN value.
+                    base_dn - template used to construct the base DN for the LDAP search.
+                            The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
+                             of the template with the actual user name and bind DN during the LDAP search.
+                    scope - scope of the LDAP search.
+                            Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
+                    search_filter - template used to construct the search filter for the LDAP search.
+                            The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
+                             substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
+                            Note, that the special characters must be escaped properly in XML.
+                verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
+                         to be successfully authenticated for all consecutive requests without contacting the LDAP server.
+                        Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
+                enable_tls - flag to trigger use of secure connection to the LDAP server.
+                        Specify 'no' for plain text (ldap://) protocol (not recommended).
+                        Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
+                        Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
+                tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
+                        Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
+                tls_require_cert - SSL/TLS peer certificate verification behavior.
+                        Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
+                tls_cert_file - path to certificate file.
+                tls_key_file - path to certificate key file.
+                tls_ca_cert_file - path to CA certificate file.
+                tls_ca_cert_dir - path to the directory containing CA certificates.
+                tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
+             Example:
+                <my_ldap_server>
+                    <host>localhost</host>
+                    <port>636</port>
+                    <bind_dn>uid={user_name},ou=users,dc=example,dc=com</bind_dn>
+                    <verification_cooldown>300</verification_cooldown>
+                    <enable_tls>yes</enable_tls>
+                    <tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
+                    <tls_require_cert>demand</tls_require_cert>
+                    <tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
+                    <tls_key_file>/path/to/tls_key_file</tls_key_file>
+                    <tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
+                    <tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
+                    <tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
+                </my_ldap_server>
+             Example (typical Active Directory with configured user DN detection for further role mapping):
+                <my_ad_server>
+                    <host>localhost</host>
+                    <port>389</port>
+                    <bind_dn>EXAMPLE\{user_name}</bind_dn>
+                    <user_dn_detection>
+                        <base_dn>CN=Users,DC=example,DC=com</base_dn>
+                        <search_filter>(&amp;(objectClass=user)(sAMAccountName={user_name}))</search_filter>
+                    </user_dn_detection>
+                    <enable_tls>no</enable_tls>
+                </my_ad_server>
+        --></ldap_servers><!-- To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured
+          to authenticate via Kerberos, define a single 'kerberos' section here.
+         Parameters:
+            principal - canonical service principal name, that will be acquired and used when accepting security contexts.
+                    This parameter is optional, if omitted, the default principal will be used.
+                    This parameter cannot be specified together with 'realm' parameter.
+            realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
+                    This parameter is optional, if omitted, no additional filtering by realm will be applied.
+                    This parameter cannot be specified together with 'principal' parameter.
+         Example:
+            <kerberos />
+         Example:
+            <kerberos>
+                <principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
+            </kerberos>
+         Example:
+            <kerberos>
+                <realm>EXAMPLE.COM</realm>
+            </kerberos>
+    --><!-- Sources to read users, roles, access rights, profiles of settings, quotas. --><user_directories><users_xml><!-- Path to configuration file with predefined users. --><path>users.xml</path></users_xml><local_directory><!-- Path to folder where users created by SQL commands are stored. --><path>/var/lib/clickhouse/access/</path></local_directory><!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
+              with the following parameters:
+                server - one of LDAP server names defined in 'ldap_servers' config section above.
+                        This parameter is mandatory and cannot be empty.
+                roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
+                        If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
+                         actions after authentication.
+                role_mapping - section with LDAP search parameters and mapping rules.
+                        When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
+                         name of the logged in user. For each entry found during that search, the value of the specified attribute is
+                         extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
+                         value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
+                         CREATE ROLE command.
+                        There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
+                         applied.
+                    base_dn - template used to construct the base DN for the LDAP search.
+                            The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
+                             substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
+                    scope - scope of the LDAP search.
+                            Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
+                    search_filter - template used to construct the search filter for the LDAP search.
+                            The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
+                             '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
+                             each LDAP search.
+                            Note, that the special characters must be escaped properly in XML.
+                    attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
+                    prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
+                             the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
+                             as local role names. Empty, by default.
+             Example:
+                <ldap>
+                    <server>my_ldap_server</server>
+                    <roles>
+                        <my_local_role1 />
+                        <my_local_role2 />
+                    </roles>
+                    <role_mapping>
+                        <base_dn>ou=groups,dc=example,dc=com</base_dn>
+                        <scope>subtree</scope>
+                        <search_filter>(&amp;(objectClass=groupOfNames)(member={bind_dn}))</search_filter>
+                        <attribute>cn</attribute>
+                        <prefix>clickhouse_</prefix>
+                    </role_mapping>
+                </ldap>
+             Example (typical Active Directory with role mapping that relies on the detected user DN):
+                <ldap>
+                    <server>my_ad_server</server>
+                    <role_mapping>
+                        <base_dn>CN=Users,DC=example,DC=com</base_dn>
+                        <attribute>CN</attribute>
+                        <scope>subtree</scope>
+                        <search_filter>(&amp;(objectClass=group)(member={user_dn}))</search_filter>
+                        <prefix>clickhouse_</prefix>
+                    </role_mapping>
+                </ldap>
+        --></user_directories><access_control_improvements><!-- Enables logic that users without permissive row policies can still read rows using a SELECT query.
+             For example, if there two users A, B and a row policy is defined only for A, then
+             if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
+             By default this setting is false for compatibility with earlier access configurations. --><users_without_row_policies_can_read_rows>false</users_without_row_policies_can_read_rows><!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
+             however you can change this behaviour by setting this to true --><on_cluster_queries_require_cluster_grant>false</on_cluster_queries_require_cluster_grant><!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
+             by any user. You can change this behaviour by setting this to true.
+             If it's set to true then this query requires "GRANT SELECT ON system.<table>" just like as for non-system tables.
+             Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
+             are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
+             table (i.e. "system.users") will be accessible. --><select_from_system_db_requires_grant>false</select_from_system_db_requires_grant><!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
+             executed by any user. You can change this behaviour by setting this to true.
+             If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. --><select_from_information_schema_requires_grant>false</select_from_information_schema_requires_grant><!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
+             previous profile. You can change this behaviour by setting this to true.
+             If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
+             actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
+             It also enables 'changeable_in_readonly' constraint type --><settings_constraints_replace_previous>false</settings_constraints_replace_previous><!-- Number of seconds since last access a role is stored in the Role Cache --><role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds></access_control_improvements><!-- Default profile of settings. --><default_profile>default</default_profile><!-- Comma-separated list of prefixes for user-defined settings. --><custom_settings_prefixes/><!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). --><!-- <system_profile>default</system_profile> --><!-- Buffer profile of settings.
+         This settings are used by Buffer storage to flush data to the underlying table.
+         Default: used from system_profile directive.
+    --><!-- <buffer_profile>default</buffer_profile> --><!-- Default database. --><default_database>default</default_database><!-- Server time zone could be set here.
+
+         Time zone is used when converting between String and DateTime types,
+          when printing DateTime in text formats and parsing DateTime from text,
+          it is used in date and time related functions, if specific time zone was not passed as an argument.
+
+         Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
+         If not specified, system time zone at server startup is used.
+
+         Please note, that server could display time zone alias instead of specified name.
+         Example: Zulu is an alias for UTC.
+    --><!-- <timezone>UTC</timezone> --><!-- You can specify umask here (see "man umask"). Server will apply it on startup.
+         Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+    --><!-- <umask>022</umask> --><!-- Perform mlockall after startup to lower first queries latency
+          and to prevent clickhouse executable from being paged out under high IO load.
+         Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
+    --><mlock_executable>true</mlock_executable><!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. --><remap_executable>false</remap_executable><![CDATA[
+         Uncomment below in order to use JDBC table engine and function.
+
+         To install and run JDBC bridge in background:
+         * [Debian/Ubuntu]
+           export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
+           export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
+           wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+           apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+           clickhouse-jdbc-bridge &
+
+         * [CentOS/RHEL]
+           export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
+           export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
+           wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+           yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+           clickhouse-jdbc-bridge &
+
+         Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
+    ]]><!--
+    <jdbc_bridge>
+        <host>127.0.0.1</host>
+        <port>9019</port>
+    </jdbc_bridge>
+    --><!-- Configuration of clusters that could be used in Distributed tables.
+         https://clickhouse.com/docs/en/operations/table_engines/distributed/
+      --><!-- The list of hosts allowed to use in URL-related storage engines and table functions.
+        If this section is not present in configuration, all hosts are allowed.
+    --><!--<remote_url_allow_hosts>--><!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
+            Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
+                    If port is explicitly specified in URL, the host:port is checked as a whole.
+                    If host specified here without port, any port with this host allowed.
+                    "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
+            If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
+            If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
+            Host should be specified using the host xml tag:
+                    <host>clickhouse.com</host>
+        --><!-- Regular expression can be specified. RE2 engine is used for regexps.
+            Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
+            (forgetting to do so is a common source of error).
+        --><!--</remote_url_allow_hosts>--><!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
+         By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
+         Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
+      --><!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
+         Optional. If you don't use replicated tables, you could omit that.
+
+         See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
+      --><!--
+    <zookeeper>
+        <node>
+            <host>example1</host>
+            <port>2181</port>
+        </node>
+        <node>
+            <host>example2</host>
+            <port>2181</port>
+        </node>
+        <node>
+            <host>example3</host>
+            <port>2181</port>
+        </node>
+    </zookeeper>
+    --><!-- Substitutions for parameters of replicated tables.
+          Optional. If you don't use replicated tables, you could omit that.
+
+         See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
+      --><!--
+    <macros>
+        <shard>01</shard>
+        <replica>example01-01-1</replica>
+    </macros>
+    --><!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. --><builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval><!-- Maximum session timeout, in seconds. Default: 3600. --><max_session_timeout>3600</max_session_timeout><!-- Default session timeout, in seconds. Default: 60. --><default_session_timeout>60</default_session_timeout><!-- Sending data to Graphite for monitoring. Several sections can be defined. --><!--
+        interval - send every X second
+        root_path - prefix for keys
+        hostname_in_path - append hostname to root_path (default = true)
+        metrics - send data from table system.metrics
+        events - send data from table system.events
+        asynchronous_metrics - send data from table system.asynchronous_metrics
+    --><!--
+    <graphite>
+        <host>localhost</host>
+        <port>42000</port>
+        <timeout>0.1</timeout>
+        <interval>60</interval>
+        <root_path>one_min</root_path>
+        <hostname_in_path>true</hostname_in_path>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <events_cumulative>false</events_cumulative>
+        <asynchronous_metrics>true</asynchronous_metrics>
+    </graphite>
+    <graphite>
+        <host>localhost</host>
+        <port>42000</port>
+        <timeout>0.1</timeout>
+        <interval>1</interval>
+        <root_path>one_sec</root_path>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <events_cumulative>false</events_cumulative>
+        <asynchronous_metrics>false</asynchronous_metrics>
+    </graphite>
+    --><!-- Serve endpoint for Prometheus monitoring. --><!--
+        endpoint - mertics path (relative to root, statring with "/")
+        port - port to setup server. If not defined or 0 than http_port used
+        metrics - send data from table system.metrics
+        events - send data from table system.events
+        asynchronous_metrics - send data from table system.asynchronous_metrics
+        status_info - send data from different component from CH, ex: Dictionaries status
+    --><!--
+    <prometheus>
+        <endpoint>/metrics</endpoint>
+        <port>9363</port>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <asynchronous_metrics>true</asynchronous_metrics>
+        <status_info>true</status_info>
+    </prometheus>
+    --><!-- Query log. Used only for queries with setting log_queries = 1. --><query_log><!-- What table to insert data. If table is not exist, it will be created.
+             When query log structure is changed after system update,
+              then old table will be renamed and new table will be created automatically.
+        --><database>system</database><table>query_log</table><!--
+            PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
+            Example:
+                event_date
+                toMonday(event_date)
+                toYYYYMM(event_date)
+                toStartOfHour(event_time)
+        --><partition_by>toYYYYMM(event_date)</partition_by><!--
+            Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
+            Example:
+                event_date + INTERVAL 1 WEEK
+                event_date + INTERVAL 7 DAY DELETE
+                event_date + INTERVAL 2 WEEK TO DISK 'bbb'
+
+        <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
+        --><!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
+             Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
+          --><!-- Interval of flushing data. --><flush_interval_milliseconds>7500</flush_interval_milliseconds><!-- example of using a different storage policy for a system table --><!-- storage_policy>local_ssd</storage_policy --></query_log><!-- Trace log. Stores stack traces collected by query profilers.
+         See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. --><trace_log><database>system</database><table>trace_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></trace_log><!-- Query thread log. Has information about all threads participated in query execution.
+         Used only for queries with setting log_query_threads = 1. --><query_thread_log><database>system</database><table>query_thread_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></query_thread_log><!-- Query views log. Has information about all dependent views associated with a query.
+         Used only for queries with setting log_query_views = 1. --><query_views_log><database>system</database><table>query_views_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></query_views_log><!-- Uncomment if use part log.
+         Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).--><part_log><database>system</database><table>part_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></part_log><!-- Uncomment to write text log into table.
+         Text log contains all information from usual server log but stores it in structured and efficient way.
+         The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
+    <text_log>
+        <database>system</database>
+        <table>text_log</table>
+        <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+        <level></level>
+    </text_log>
+    --><!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. --><metric_log><database>system</database><table>metric_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds><collect_interval_milliseconds>1000</collect_interval_milliseconds></metric_log><!--
+        Asynchronous metric log contains values of metrics from
+        system.asynchronous_metrics.
+    --><asynchronous_metric_log><database>system</database><table>asynchronous_metric_log</table><flush_interval_milliseconds>7000</flush_interval_milliseconds></asynchronous_metric_log><!--
+        OpenTelemetry log contains OpenTelemetry trace spans.
+    --><opentelemetry_span_log><!--
+            The default table creation code is insufficient, this <engine> spec
+            is a workaround. There is no 'event_time' for this log, but two times,
+            start and finish. It is sorted by finish time, to avoid inserting
+            data too far away in the past (probably we can sometimes insert a span
+            that is seconds earlier than the last span in the table, due to a race
+            between several spans inserted in parallel). This gives the spans a
+            global order that we can use to e.g. retry insertion into some external
+            system.
+        --><engine>
+            engine MergeTree
+            partition by toYYYYMM(finish_date)
+            order by (finish_date, finish_time_us, trace_id)
+        </engine><database>system</database><table>opentelemetry_span_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds></opentelemetry_span_log><!-- Crash log. Stores stack traces for fatal errors.
+         This table is normally empty. --><crash_log><database>system</database><table>crash_log</table><partition_by/><flush_interval_milliseconds>1000</flush_interval_milliseconds></crash_log><!-- Session log. Stores user log in (successful or not) and log out events.
+
+        Note: session log has known security issues and should not be used in production.
+    --><!-- <session_log>
+        <database>system</database>
+        <table>session_log</table>
+
+        <partition_by>toYYYYMM(event_date)</partition_by>
+        <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+    </session_log> --><!-- Profiling on Processors level. --><processors_profile_log><database>system</database><table>processors_profile_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></processors_profile_log><!-- Log of asynchronous inserts. It allows to check status
+         of insert query in fire-and-forget mode.
+    --><asynchronous_insert_log><database>system</database><table>asynchronous_insert_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds><partition_by>event_date</partition_by><ttl>event_date + INTERVAL 3 DAY</ttl></asynchronous_insert_log><!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> --><!-- Custom TLD lists.
+         Format: <name>/path/to/file</name>
+
+         Changes will not be applied w/o server restart.
+         Path to the list is under top_level_domains_path (see above).
+    --><top_level_domains_lists><!--
+        <public_suffix_list>/path/to/public_suffix_list.dat</public_suffix_list>
+        --></top_level_domains_lists><!-- Configuration of external dictionaries. See:
+         https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
+    --><dictionaries_config>*_dictionary.xml</dictionaries_config><!-- Configuration of user defined executable functions --><user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config><!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
+     If not specified they will be stored locally. --><!-- <user_defined_zookeeper_path>/clickhouse/user_defined<user_defined_zookeeper_path> --><!-- Uncomment if you want data to be compressed 30-100% better.
+         Don't do that if you just started using ClickHouse.
+      --><!--
+    <compression>
+        <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
+        <case>
+
+            <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
+            <min_part_size>10000000000</min_part_size>        <!- - Min part size in bytes. - ->
+            <min_part_size_ratio>0.01</min_part_size_ratio>   <!- - Min size of part relative to whole table size. - ->
+
+            <!- - What compression method to use. - ->
+            <method>zstd</method>
+        </case>
+    </compression>
+    --><!-- Configuration of encryption. The server executes a command to
+         obtain an encryption key at startup if such a command is
+         defined, or encryption codecs will be disabled otherwise. The
+         command is executed through /bin/sh and is expected to write
+         a Base64-encoded key to the stdout. --><encryption_codecs><!-- aes_128_gcm_siv --><!-- Example of getting hex key from env --><!-- the code should use this key and throw an exception if its length is not 16 bytes --><!--key_hex from_env="..."></key_hex --><!-- Example of multiple hex keys. They can be imported from env or be written down in config--><!-- the code should use these keys and throw an exception if their length is not 16 bytes --><!-- key_hex id="0">...</key_hex --><!-- key_hex id="1" from_env=".."></key_hex --><!-- key_hex id="2">...</key_hex --><!-- current_key_id>2</current_key_id --><!-- Example of getting hex key from config --><!-- the code should use this key and throw an exception if its length is not 16 bytes --><!-- key>...</key --><!-- example of adding nonce --><!-- nonce>...</nonce --><!-- /aes_128_gcm_siv --></encryption_codecs><!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
+         Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. --><distributed_ddl><!-- Path in ZooKeeper to queue with DDL queries --><path>/clickhouse/task_queue/ddl</path><!-- Settings from this profile will be used to execute DDL queries --><!-- <profile>default</profile> --><!-- Controls how much ON CLUSTER queries can be run simultaneously. --><!-- <pool_size>1</pool_size> --><!--
+             Cleanup settings (active tasks will not be removed)
+        --><!-- Controls task TTL (default 1 week) --><!-- <task_max_lifetime>604800</task_max_lifetime> --><!-- Controls how often cleanup should be performed (in seconds) --><!-- <cleanup_delay_period>60</cleanup_delay_period> --><!-- Controls how many tasks could be in the queue --><!-- <max_tasks_in_queue>1000</max_tasks_in_queue> --></distributed_ddl><!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h --><!--
+    <merge_tree>
+        <max_suspicious_broken_parts>5</max_suspicious_broken_parts>
+    </merge_tree>
+    --><!-- Protection from accidental DROP.
+         If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
+         If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
+         By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
+         The same for max_partition_size_to_drop.
+         Uncomment to disable protection.
+    --><!-- <max_table_size_to_drop>0</max_table_size_to_drop> --><!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> --><!-- Example of parameters for GraphiteMergeTree table engine --><graphite_rollup_example><pattern><regexp>click_cost</regexp><function>any</function><retention><age>0</age><precision>3600</precision></retention><retention><age>86400</age><precision>60</precision></retention></pattern><default><function>max</function><retention><age>0</age><precision>60</precision></retention><retention><age>3600</age><precision>300</precision></retention><retention><age>86400</age><precision>3600</precision></retention></default></graphite_rollup_example><!-- Directory in <clickhouse-path> containing schema files for various input formats.
+         The directory will be created if it doesn't exist.
+      --><format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path><!-- Default query masking rules, matching lines would be replaced with something else in the logs
+        (both text logs and system.query_log).
+        name - name for the rule (optional)
+        regexp - RE2 compatible regular expression (mandatory)
+        replace - substitution string for sensitive data (optional, by default - six asterisks)
+    <query_masking_rules>
+        <rule>
+            <name>hide encrypt/decrypt arguments</name>
+            <regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
+            <replace>\1(???)</replace>
+        </rule>
+    </query_masking_rules> --><!-- Uncomment to use custom http handlers.
+        rules are checked from top to bottom, first match runs the handler
+            url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
+            methods - to match request method, you can use commas to separate multiple method matches(optional)
+            headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
+        handler is request handler
+            type - supported types: static, dynamic_query_handler, predefined_query_handler
+            query - use with predefined_query_handler type, executes query when the handler is called
+            query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
+            status - use with static type, response status code
+            content_type - use with static type, response content-type
+            response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
+
+    <http_handlers>
+        <rule>
+            <url>/</url>
+            <methods>POST,GET</methods>
+            <headers><pragma>no-cache</pragma></headers>
+            <handler>
+                <type>dynamic_query_handler</type>
+                <query_param_name>query</query_param_name>
+            </handler>
+        </rule>
+
+        <rule>
+            <url>/predefined_query</url>
+            <methods>POST,GET</methods>
+            <handler>
+                <type>predefined_query_handler</type>
+                <query>SELECT * FROM system.settings</query>
+            </handler>
+        </rule>
+
+        <rule>
+            <handler>
+                <type>static</type>
+                <status>200</status>
+                <content_type>text/plain; charset=UTF-8</content_type>
+                <response_content>config://http_server_default_response</response_content>
+            </handler>
+        </rule>
+    </http_handlers>
+    --><send_crash_reports><!-- Changing <enabled> to true allows sending crash reports to --><!-- the ClickHouse core developers team via Sentry https://sentry.io --><!-- Doing so at least in pre-production environments is highly appreciated --><enabled>false</enabled><!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report --><anonymize>false</anonymize><!-- Default endpoint should be changed to different Sentry DSN only if you have --><!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you --><endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint></send_crash_reports><!-- Uncomment to disable ClickHouse internal DNS caching. --><!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> --><!-- You can also configure rocksdb like this: --><!--
+    <rocksdb>
+        <options>
+            <max_background_jobs>8</max_background_jobs>
+        </options>
+        <column_family_options>
+            <num_levels>2</num_levels>
+        </column_family_options>
+        <tables>
+            <table>
+                <name>TABLE</name>
+                <options>
+                    <max_background_jobs>8</max_background_jobs>
+                </options>
+                <column_family_options>
+                    <num_levels>2</num_levels>
+                </column_family_options>
+            </table>
+        </tables>
+    </rocksdb>
+    --><!-- Configuration for the query cache --><!-- <query_cache> --><!--     <max_size>1073741824</max_size> --><!--     <max_entries>1024</max_entries> --><!--     <max_entry_size>1048576</max_entry_size> --><!--     <max_entry_rows>30000000</max_entry_rows> --><!-- </query_cache> --><!-- Uncomment if enable merge tree metadata cache --><!--merge_tree_metadata_cache>
+        <lru_cache_size>268435456</lru_cache_size>
+        <continue_if_corrupted>true</continue_if_corrupted>
+    </merge_tree_metadata_cache--><!-- This allows to disable exposing addresses in stack traces for security reasons.
+         Please be aware that it does not improve security much, but makes debugging much harder.
+         The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
+         Regardless of this configuration, the addresses are visible in the system.stack_trace and system.trace_log tables
+         if the user has access to these tables.
+         I don't recommend to change this setting.
+    <show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
+    --><!-- On Linux systems this can control the behavior of OOM killer.
+    <oom_score>-1000</oom_score>
+    --><http_port from_env="CLICKHOUSE_HTTP_PORT"/><tcp_port from_env="CLICKHOUSE_TCP_PORT"/><mysql_port from_env="CLICKHOUSE_MYSQL_PORT"/><postgresql_port from_env="CLICKHOUSE_POSTGRESQL_PORT"/><interserver_http_port from_env="CLICKHOUSE_INTERSERVER_HTTP_PORT"/></clickhouse>

+ 102 - 0
clickhouse/clickhouse_etc/users.xml

@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<clickhouse>
+  <!-- See also the files in users.d directory where the settings can be overridden. -->
+  <!-- Profiles of settings. -->
+  <profiles>
+    <!-- Default settings. -->
+    <default>
+        </default>
+    <!-- Profile that allows only read queries. -->
+    <readonly>
+      <readonly>1</readonly>
+    </readonly>
+  </profiles>
+  <!-- Users and ACL. -->
+  <users>
+    <!-- If user name was not specified, 'default' user is used. -->
+    <default>
+      <!-- See also the files in users.d directory where the password can be overridden.
+
+                 Password could be specified in plaintext or in SHA256 (in hex format).
+
+                 If you want to specify password in plaintext (not recommended), place it in 'password' element.
+                 Example: <password>qwerty</password>.
+                 Password could be empty.
+
+                 If you want to specify SHA256, place it in 'password_sha256_hex' element.
+                 Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
+                 Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
+
+                 If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
+                 Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
+
+                 If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
+                  place its name in 'server' element inside 'ldap' element.
+                 Example: <ldap><server>my_ldap_server</server></ldap>
+
+                 If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
+                  place 'kerberos' element instead of 'password' (and similar) elements.
+                 The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
+                 You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
+                  whose initiator's realm matches it.
+                 Example: <kerberos />
+                 Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
+
+                 How to generate decent password:
+                 Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
+                 In first line will be password and in second - corresponding SHA256.
+
+                 How to generate double SHA1:
+                 Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
+                 In first line will be password and in second - corresponding double SHA1.
+            -->
+      <password from_env="CLICKHOUSE_ADMIN_PASSWORD"/>
+      <!-- List of networks with open access.
+
+                 To open access from everywhere, specify:
+                    <ip>::/0</ip>
+
+                 To open access only from localhost, specify:
+                    <ip>::1</ip>
+                    <ip>127.0.0.1</ip>
+
+                 Each element of list has one of the following forms:
+                 <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
+                     2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+                 <host> Hostname. Example: server01.clickhouse.com.
+                     To check access, DNS query is performed, and all received addresses compared to peer address.
+                 <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
+                     To check access, DNS PTR query is performed for peer address and then regexp is applied.
+                     Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
+                     Strongly recommended that regexp is ends with $
+                 All results of DNS requests are cached till server restart.
+            -->
+      <networks>
+        <ip>::/0</ip>
+      </networks>
+      <!-- Settings profile for user. -->
+      <profile>default</profile>
+      <!-- Quota for user. -->
+      <quota>default</quota>
+      <!-- User can create other users and grant rights to them. -->
+      <!-- <access_management>1</access_management> -->
+    </default>
+  </users>
+  <!-- Quotas. -->
+  <quotas>
+    <!-- Name of quota. -->
+    <default>
+      <!-- Limits for time interval. You could specify many intervals with different limits. -->
+      <interval>
+        <!-- Length of interval. -->
+        <duration>3600</duration>
+        <!-- No limits. Just calculate resource usage for time interval. -->
+        <queries>0</queries>
+        <errors>0</errors>
+        <result_rows>0</result_rows>
+        <read_rows>0</read_rows>
+        <execution_time>0</execution_time>
+      </interval>
+    </default>
+  </quotas>
+</clickhouse>

+ 18 - 0
clickhouse/templates/configmap-users.yaml

@@ -0,0 +1,18 @@
+{{- if not .Values.existingOverridesConfigmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "common.names.fullname" . }}-users
+  namespace: {{ include "common.names.namespace" . | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: clickhouse
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  users.xml: |
+    {{- include "common.tplvalues.render" (dict "value" .Values.defaultConfigurationOverridesUsers "context" $) | nindent 4 }}
+{{- end }}

+ 9 - 0
clickhouse/templates/statefulset.yaml

@@ -27,6 +27,7 @@ spec:
     metadata:
       annotations:
         checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") $ | sha256sum }}
+        checksum/config-users: {{ include (print $.Template.BasePath "/configmap-users.yaml") $ | sha256sum }}
         checksum/config-extra: {{ include (print $.Template.BasePath "/configmap-extra.yaml") $ | sha256sum }}
         {{- if $.Values.podAnnotations }}
         {{- include "common.tplvalues.render" (dict "value" $.Values.podAnnotations "context" $) | nindent 8 }}
@@ -308,6 +309,8 @@ spec:
               mountPath: /bitnami/clickhouse
             - name: config
               mountPath: /bitnami/clickhouse/etc/conf.d/default
+            - name: config-users
+              mountPath: /bitnami/clickhouse/etc
           {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }}
             - name: extra-config
               mountPath: /bitnami/clickhouse/etc/conf.d/extra-configmap
@@ -342,6 +345,12 @@ spec:
         - name: config
           configMap:
             name: {{ template "clickhouse.configmapName" $ }}
+        - name: config-users
+          configMap:
+            name: clickhouse-users
+            items:
+            - key: users.xml
+              path: users.xml
         {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }}
         - name: custom-init-scripts
           secret:

+ 113 - 0
clickhouse/values.yaml

@@ -239,6 +239,119 @@ keeper:
 
 ## @param defaultConfigurationOverrides [string] Default configuration overrides (evaluated as a template)
 ##
+defaultConfigurationOverridesUsers: |
+  <?xml version="1.0"?>
+    <clickhouse>
+      <!-- See also the files in users.d directory where the settings can be overridden. -->
+      <!-- Profiles of settings. -->
+      <profiles>
+        <!-- Default settings. -->
+        <default>
+            </default>
+        <!-- Profile that allows only read queries. -->
+        <readonly>
+          <readonly>1</readonly>
+        </readonly>
+      </profiles>
+      <!-- Users and ACL. -->
+      <users>
+        <!-- If user name was not specified, 'default' user is used. -->
+        <default>
+          <!-- See also the files in users.d directory where the password can be overridden.
+    
+                     Password could be specified in plaintext or in SHA256 (in hex format).
+    
+                     If you want to specify password in plaintext (not recommended), place it in 'password' element.
+                     Example: <password>qwerty</password>.
+                     Password could be empty.
+    
+                     If you want to specify SHA256, place it in 'password_sha256_hex' element.
+                     Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
+                     Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
+    
+                     If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
+                     Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
+    
+                     If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
+                      place its name in 'server' element inside 'ldap' element.
+                     Example: <ldap><server>my_ldap_server</server></ldap>
+    
+                     If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
+                      place 'kerberos' element instead of 'password' (and similar) elements.
+                     The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
+                     You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
+                      whose initiator's realm matches it.
+                     Example: <kerberos />
+                     Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
+    
+                     How to generate decent password:
+                     Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
+                     In first line will be password and in second - corresponding SHA256.
+    
+                     How to generate double SHA1:
+                     Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
+                     In first line will be password and in second - corresponding double SHA1.
+                -->
+          <password from_env="CLICKHOUSE_ADMIN_PASSWORD"/>
+          <!-- List of networks with open access.
+    
+                     To open access from everywhere, specify:
+                        <ip>::/0</ip>
+    
+                     To open access only from localhost, specify:
+                        <ip>::1</ip>
+                        <ip>127.0.0.1</ip>
+    
+                     Each element of list has one of the following forms:
+                     <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
+                         2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+                     <host> Hostname. Example: server01.clickhouse.com.
+                         To check access, DNS query is performed, and all received addresses compared to peer address.
+                     <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
+                         To check access, DNS PTR query is performed for peer address and then regexp is applied.
+                         Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
+                         Strongly recommended that regexp is ends with $
+                     All results of DNS requests are cached till server restart.
+                -->
+          <networks>
+            <ip>::/0</ip>
+          </networks>
+          <!-- Settings profile for user. -->
+          <profile>default</profile>
+          <!-- Quota for user. -->
+          <quota>default</quota>
+          <!-- User can create other users and grant rights to them. -->
+          <access_management>1</access_management>
+        </default>
+        <viewer>
+          <password>ngh5T@12356789</password>
+          <networks>
+            <ip>::/0</ip>
+          </networks>
+          <!-- Settings profile for user. -->
+          <profile>readonly</profile>
+          <quota>default</quota>
+        </viewer>
+      </users>
+      <!-- Quotas. -->
+      <quotas>
+        <!-- Name of quota. -->
+        <default>
+          <!-- Limits for time interval. You could specify many intervals with different limits. -->
+          <interval>
+            <!-- Length of interval. -->
+            <duration>3600</duration>
+            <!-- No limits. Just calculate resource usage for time interval. -->
+            <queries>0</queries>
+            <errors>0</errors>
+            <result_rows>0</result_rows>
+            <read_rows>0</read_rows>
+            <execution_time>0</execution_time>
+          </interval>
+        </default>
+      </quotas>
+    </clickhouse> 
+
 defaultConfigurationOverrides: |
   <clickhouse>
     <!-- Macros -->

+ 20 - 0
clickhouse_etc/conf.d/00_default_overrides.xml

@@ -0,0 +1,20 @@
+<clickhouse>
+  <!-- Macros -->
+  <macros>
+    <shard from_env="CLICKHOUSE_SHARD_ID"></shard>
+    <replica from_env="CLICKHOUSE_REPLICA_ID"></replica>
+    <layer>clickhouse</layer>
+  </macros>
+  <!-- Log Level -->
+  <logger>
+    <level>information</level>
+  </logger>
+  <!-- Zookeeper configuration -->
+  <zookeeper>
+    
+    <node>
+      <host from_env="KEEPER_NODE_0"></host>
+      <port>2181</port>
+    </node>
+  </zookeeper>
+</clickhouse>

+ 787 - 0
clickhouse_etc/config.xml

@@ -0,0 +1,787 @@
+<?xml version="1.0"?>
+<!--
+  NOTE: User and query level settings are set up in "users.xml" file.
+  If you have accidentally specified user-level settings here, server won't start.
+  You can either move the settings to the right place inside "users.xml" file
+   or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
+-->
+<clickhouse><logger><!-- Possible levels [1]:
+
+          - none (turns off logging)
+          - fatal
+          - critical
+          - error
+          - warning
+          - notice
+          - information
+          - debug
+          - trace
+          - test (not for production usage)
+
+            [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
+        --><level>trace</level><!-- Rotation policy
+             See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
+          --><size>1000M</size><count>10</count><!-- <console>1</console> --><!-- Default behavior is autodetection (log to console if not daemon mode and is tty) --><!-- Per level overrides (legacy):
+
+        For example to suppress logging of the ConfigReloader you can use:
+        NOTE: levels.logger is reserved, see below.
+        --><!--
+        <levels>
+          <ConfigReloader>none</ConfigReloader>
+        </levels>
+        --><!-- Per level overrides:
+
+        For example to suppress logging of the RBAC for default user you can use:
+        (But please note that the logger name maybe changed from version to version, even after minor upgrade)
+        --><!--
+        <levels>
+          <logger>
+            <name>ContextAccess (default)</name>
+            <level>none</level>
+          </logger>
+          <logger>
+            <name>DatabaseOrdinary (test)</name>
+            <level>none</level>
+          </logger>
+        </levels>
+        --><!-- Structured log formatting:
+        You can specify log format(for now, JSON only). In that case, the console log will be printed
+        in specified format like JSON.
+        For example, as below:
+        {"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
+        To enable JSON logging support, please uncomment the entire <formatting> tag below.
+
+        a) You can modify key names by changing values under tag values inside <names> tag.
+        For example, to change DATE_TIME to MY_DATE_TIME, you can do like:
+            <date_time>MY_DATE_TIME</date_time>
+        b) You can stop unwanted log properties to appear in logs. To do so, you can simply comment out (recommended)
+        that property from this file.
+        For example, if you do not want your log to print query_id, you can comment out only <query_id> tag.
+        However, if you comment out all the tags under <names>, the program will print default values for as
+        below.
+        --><!-- <formatting>
+            <type>json</type>
+            <names>
+                <date_time>date_time</date_time>
+                <thread_name>thread_name</thread_name>
+                <thread_id>thread_id</thread_id>
+                <level>level</level>
+                <query_id>query_id</query_id>
+                <logger_name>logger_name</logger_name>
+                <message>message</message>
+                <source_file>source_file</source_file>
+                <source_line>source_line</source_line>
+            </names>
+        </formatting> --><console>1</console></logger><!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. --><!-- It is off by default. Next headers are obligate for CORS.--><!-- http_options_response>
+        <header>
+            <name>Access-Control-Allow-Origin</name>
+            <value>*</value>
+        </header>
+        <header>
+            <name>Access-Control-Allow-Headers</name>
+            <value>origin, x-requested-with</value>
+        </header>
+        <header>
+            <name>Access-Control-Allow-Methods</name>
+            <value>POST, GET, OPTIONS</value>
+        </header>
+        <header>
+            <name>Access-Control-Max-Age</name>
+            <value>86400</value>
+        </header>
+    </http_options_response --><!-- It is the name that will be shown in the clickhouse-client.
+         By default, anything with "production" will be highlighted in red in query prompt.
+    --><!--display_name>production</display_name--><!-- Port for HTTP API. See also 'https_port' for secure connections.
+         This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
+         and by most of web interfaces (embedded UI, Grafana, Redash, ...).
+      --><!-- Port for interaction by native protocol with:
+         - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark, clickhouse-copier);
+         - clickhouse-server with other clickhouse-servers for distributed query processing;
+         - ClickHouse drivers and applications supporting native protocol
+         (this protocol is also informally called as "the TCP protocol");
+         See also 'tcp_port_secure' for secure connections.
+    --><!-- Compatibility with MySQL protocol.
+         ClickHouse will pretend to be MySQL for applications connecting to this port.
+    --><!-- Compatibility with PostgreSQL protocol.
+         ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
+    --><!-- HTTP API with TLS (HTTPS).
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+    --><!-- <https_port>8443</https_port> --><!-- Native interface with TLS.
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+    --><!-- <tcp_port_secure>9440</tcp_port_secure> --><!-- Native interface wrapped with PROXYv1 protocol
+         PROXYv1 header sent for every connection.
+         ClickHouse will extract information about proxy-forwarded client address from the header.
+    --><!-- <tcp_with_proxy_port>9011</tcp_with_proxy_port> --><!-- Port for communication between replicas. Used for data exchange.
+         It provides low-level data access between servers.
+         This port should not be accessible from untrusted networks.
+         See also 'interserver_http_credentials'.
+         Data transferred over connections to this port should not go through untrusted networks.
+         See also 'interserver_https_port'.
+      --><!-- Port for communication between replicas with TLS.
+         You have to configure certificate to enable this interface.
+         See the openSSL section below.
+         See also 'interserver_http_credentials'.
+      --><!-- <interserver_https_port>9010</interserver_https_port> --><!-- Hostname that is used by other replicas to request this server.
+         If not specified, then it is determined analogous to 'hostname -f' command.
+         This setting could be used to switch replication to another network interface
+         (the server may be connected to multiple networks via multiple addresses)
+      --><!--
+    <interserver_http_host>example.clickhouse.com</interserver_http_host>
+    --><!-- You can specify credentials for authenthication between replicas.
+         This is required when interserver_https_port is accessible from untrusted networks,
+         and also recommended to avoid SSRF attacks from possibly compromised services in your network.
+      --><!--<interserver_http_credentials>
+        <user>interserver</user>
+        <password></password>
+    </interserver_http_credentials>--><!-- Listen specified address.
+         Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
+         Notes:
+         If you open connections from wildcard address, make sure that at least one of the following measures applied:
+         - server is protected by firewall and not accessible from untrusted networks;
+         - all users are restricted to subset of network addresses (see users.xml);
+         - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
+         - users without password have readonly access.
+         See also: https://www.shodan.io/search?query=clickhouse
+      --><!-- <listen_host>::</listen_host> --><!-- Same for hosts without support for IPv6: --><!-- <listen_host>0.0.0.0</listen_host> --><!-- Default values - try listen localhost on IPv4 and IPv6. --><!--
+    <listen_host>::1</listen_host>
+    <listen_host>127.0.0.1</listen_host>
+    --><!-- <interserver_listen_host>::</interserver_listen_host> --><!-- Listen host for communication between replicas. Used for data exchange --><!-- Default values - equal to listen_host --><!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. --><!-- <listen_try>0</listen_try> --><!-- Allow multiple servers to listen on the same address:port. This is not recommended.
+      --><!-- <listen_reuse_port>0</listen_reuse_port> --><!-- <listen_backlog>4096</listen_backlog> --><max_connections>4096</max_connections><!-- For 'Connection: keep-alive' in HTTP 1.1 --><keep_alive_timeout>3</keep_alive_timeout><!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) --><!-- <grpc_port>9100</grpc_port> --><grpc><enable_ssl>false</enable_ssl><!-- The following two files are used only if enable_ssl=1 --><ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file><ssl_key_file>/path/to/ssl_key_file</ssl_key_file><!-- Whether server will request client for a certificate --><ssl_require_client_auth>false</ssl_require_client_auth><!-- The following file is used only if ssl_require_client_auth=1 --><ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file><!-- Default transport compression type (can be overridden by client, see the transport_compression_type field in QueryInfo).
+             Supported algorithms: none, deflate, gzip, stream_gzip --><transport_compression_type>none</transport_compression_type><!-- Default transport compression level. Supported levels: 0..3 --><transport_compression_level>0</transport_compression_level><!-- Send/receive message size limits in bytes. -1 means unlimited --><max_send_message_size>-1</max_send_message_size><max_receive_message_size>-1</max_receive_message_size><!-- Enable if you want very detailed logs --><verbose_logs>false</verbose_logs></grpc><!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 --><openSSL><server><!-- Used for https server AND secure tcp port --><!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt --><!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
+            <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> --><!-- dhparams are optional. You can delete the <dhParamsFile> element.
+                 To generate dhparams, use the following command:
+                  openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
+                 Only file format with BEGIN DH PARAMETERS is supported.
+              --><!-- <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>--><verificationMode>none</verificationMode><loadDefaultCAFile>true</loadDefaultCAFile><cacheSessions>true</cacheSessions><disableProtocols>sslv2,sslv3</disableProtocols><preferServerCiphers>true</preferServerCiphers></server><client><!-- Used for connecting to https dictionary source and secured Zookeeper communication --><loadDefaultCAFile>true</loadDefaultCAFile><cacheSessions>true</cacheSessions><disableProtocols>sslv2,sslv3</disableProtocols><preferServerCiphers>true</preferServerCiphers><!-- Use for self-signed: <verificationMode>none</verificationMode> --><invalidCertificateHandler><!-- Use for self-signed: <name>AcceptCertificateHandler</name> --><name>RejectCertificateHandler</name></invalidCertificateHandler></client></openSSL><!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 --><!--
+    <http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
+    --><!-- The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries.
+         This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run.
+         Query can upscale to desired number of threads during execution if more threads become available.
+    --><concurrent_threads_soft_limit_num>0</concurrent_threads_soft_limit_num><concurrent_threads_soft_limit_ratio_to_cores>0</concurrent_threads_soft_limit_ratio_to_cores><!-- Maximum number of concurrent queries. --><max_concurrent_queries>100</max_concurrent_queries><!-- Maximum memory usage (resident set size) for server process.
+         Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
+         If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
+
+         The constraint is checked on query execution time.
+         If a query tries to allocate memory and the current memory usage plus allocation is greater
+          than specified threshold, exception will be thrown.
+
+         It is not practical to set this constraint to small values like just a few gigabytes,
+          because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
+      --><max_server_memory_usage>0</max_server_memory_usage><!-- Maximum number of threads in the Global thread pool.
+    This will default to a maximum of 10000 threads if not specified.
+    This setting will be useful in scenarios where there are a large number
+    of distributed queries that are running concurrently but are idling most
+    of the time, in which case a higher number of threads might be required.
+    --><max_thread_pool_size>10000</max_thread_pool_size><!-- Configure other thread pools: --><!--
+    <background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
+    <background_pool_size>16</background_pool_size>
+    <background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
+    <background_merges_mutations_scheduling_policy>round_robin</background_merges_mutations_scheduling_policy>
+    <background_move_pool_size>8</background_move_pool_size>
+    <background_fetches_pool_size>8</background_fetches_pool_size>
+    <background_common_pool_size>8</background_common_pool_size>
+    <background_schedule_pool_size>128</background_schedule_pool_size>
+    <background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
+    <background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
+    --><!-- On memory constrained environments you may have to set this to value larger than 1.
+      --><max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio><!-- Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
+         Data will be stored in system.trace_log table with query_id = empty string.
+         Zero means disabled.
+      --><total_memory_profiler_step>4194304</total_memory_profiler_step><!-- Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
+         The probability is for every alloc/free regardless to the size of the allocation.
+         Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
+          which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
+         You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
+      --><total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability><!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
+         correct maximum value. --><!-- <max_open_files>262144</max_open_files> --><!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
+         In bytes. Cache is single for server. Memory is allocated only on demand.
+         Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
+         Uncompressed cache is advantageous only for very short queries and in rare cases.
+
+         Note: uncompressed cache can be pointless for lz4, because memory bandwidth
+         is slower than multi-core decompression on some server configurations.
+         Enabling it can sometimes paradoxically make queries slower.
+      --><uncompressed_cache_size>8589934592</uncompressed_cache_size><!-- Approximate size of mark cache, used in tables of MergeTree family.
+         In bytes. Cache is single for server. Memory is allocated only on demand.
+         You should not lower this value.
+      --><mark_cache_size>5368709120</mark_cache_size><!-- If you enable the `min_bytes_to_use_mmap_io` setting,
+         the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
+         It makes sense only for large files and helps only if data reside in page cache.
+         To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
+         and to reuse mappings from several threads and queries,
+         the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
+         The amount of data in mapped files can be monitored
+         in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
+         and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
+         and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
+         CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
+         Note that the amount of data in mapped files does not consume memory directly and is not accounted
+         in query or server memory usage - because this memory can be discarded similar to OS page cache.
+         The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
+         also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
+      --><mmap_cache_size>1000</mmap_cache_size><!-- Cache size in bytes for compiled expressions.--><compiled_expression_cache_size>134217728</compiled_expression_cache_size><!-- Cache size in elements for compiled expressions.--><compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size><!-- Path to data directory, with trailing slash. --><path>/bitnami/clickhouse/data</path><!-- Multi-disk configuration example: --><!--
+    <storage_configuration>
+        <disks>
+            <default>
+                <keep_free_space_bytes>0</keep_free_space_bytes>
+            </default>
+            <data>
+                <path>/data/</path>
+                <keep_free_space_bytes>0</keep_free_space_bytes>
+            </data>
+            <s3>
+                <type>s3</type>
+                <endpoint>http://path/to/endpoint</endpoint>
+                <access_key_id>your_access_key_id</access_key_id>
+                <secret_access_key>your_secret_access_key</secret_access_key>
+            </s3>
+            <blob_storage_disk>
+                <type>azure_blob_storage</type>
+                <storage_account_url>http://account.blob.core.windows.net</storage_account_url>
+                <container_name>container</container_name>
+                <account_name>account</account_name>
+                <account_key>pass123</account_key>
+                <metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
+                <cache_enabled>true</cache_enabled>
+                <cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
+                <skip_access_check>false</skip_access_check>
+            </blob_storage_disk>
+        </disks>
+
+        <policies>
+            <all>
+                <volumes>
+                    <main>
+                        <disk>default</disk>
+                        <disk>data</disk>
+                        <disk>s3</disk>
+                        <disk>blob_storage_disk</disk>
+
+                        <max_data_part_size_bytes></max_data_part_size_bytes>
+                        <max_data_part_size_ratio></max_data_part_size_ratio>
+                        <perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
+                        <prefer_not_to_merge>false</prefer_not_to_merge>
+                        <load_balancing>round_robin</load_balancing>
+                    </main>
+                </volumes>
+                <move_factor>0.2</move_factor>
+            </all>
+        </policies>
+    </storage_configuration>
+    --><!-- Path to temporary data for processing hard queries. --><tmp_path>/var/lib/clickhouse/tmp/</tmp_path><!-- Disable AuthType plaintext_password and no_password for ACL. --><allow_plaintext_password>1</allow_plaintext_password><allow_no_password>1</allow_no_password><allow_implicit_no_password>1</allow_implicit_no_password><!-- Complexity requirements for user passwords. --><!-- <password_complexity>
+        <rule>
+            <pattern>.{12}</pattern>
+            <message>be at least 12 characters long</message>
+        </rule>
+        <rule>
+            <pattern>\p{N}</pattern>
+            <message>contain at least 1 numeric character</message>
+        </rule>
+        <rule>
+            <pattern>\p{Ll}</pattern>
+            <message>contain at least 1 lowercase character</message>
+        </rule>
+        <rule>
+            <pattern>\p{Lu}</pattern>
+            <message>contain at least 1 uppercase character</message>
+        </rule>
+        <rule>
+            <pattern>[^\p{L}\p{N}]</pattern>
+            <message>contain at least 1 special character</message>
+        </rule>
+    </password_complexity> --><!-- Policy from the <storage_configuration> for the temporary files.
+         If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
+
+         Notes:
+         - move_factor              is ignored
+         - keep_free_space_bytes    is ignored
+         - max_data_part_size_bytes is ignored
+         - you must have exactly one volume in that policy
+    --><!-- <tmp_policy>tmp</tmp_policy> --><!-- Directory with user provided files that are accessible by 'file' table function. --><user_files_path>/var/lib/clickhouse/user_files/</user_files_path><!-- LDAP server definitions. --><ldap_servers><!-- List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
+              who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
+             Parameters:
+                host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
+                port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
+                bind_dn - template used to construct the DN to bind to.
+                        The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
+                         user name during each authentication attempt.
+                user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
+                        This is mainly used in search filters for further role mapping when the server is Active Directory. The
+                         resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
+                         user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
+                         user DN value.
+                    base_dn - template used to construct the base DN for the LDAP search.
+                            The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
+                             of the template with the actual user name and bind DN during the LDAP search.
+                    scope - scope of the LDAP search.
+                            Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
+                    search_filter - template used to construct the search filter for the LDAP search.
+                            The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
+                             substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
+                            Note, that the special characters must be escaped properly in XML.
+                verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
+                         to be successfully authenticated for all consecutive requests without contacting the LDAP server.
+                        Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
+                enable_tls - flag to trigger use of secure connection to the LDAP server.
+                        Specify 'no' for plain text (ldap://) protocol (not recommended).
+                        Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
+                        Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
+                tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
+                        Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
+                tls_require_cert - SSL/TLS peer certificate verification behavior.
+                        Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
+                tls_cert_file - path to certificate file.
+                tls_key_file - path to certificate key file.
+                tls_ca_cert_file - path to CA certificate file.
+                tls_ca_cert_dir - path to the directory containing CA certificates.
+                tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
+             Example:
+                <my_ldap_server>
+                    <host>localhost</host>
+                    <port>636</port>
+                    <bind_dn>uid={user_name},ou=users,dc=example,dc=com</bind_dn>
+                    <verification_cooldown>300</verification_cooldown>
+                    <enable_tls>yes</enable_tls>
+                    <tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
+                    <tls_require_cert>demand</tls_require_cert>
+                    <tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
+                    <tls_key_file>/path/to/tls_key_file</tls_key_file>
+                    <tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
+                    <tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
+                    <tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
+                </my_ldap_server>
+             Example (typical Active Directory with configured user DN detection for further role mapping):
+                <my_ad_server>
+                    <host>localhost</host>
+                    <port>389</port>
+                    <bind_dn>EXAMPLE\{user_name}</bind_dn>
+                    <user_dn_detection>
+                        <base_dn>CN=Users,DC=example,DC=com</base_dn>
+                        <search_filter>(&amp;(objectClass=user)(sAMAccountName={user_name}))</search_filter>
+                    </user_dn_detection>
+                    <enable_tls>no</enable_tls>
+                </my_ad_server>
+        --></ldap_servers><!-- To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured
+          to authenticate via Kerberos, define a single 'kerberos' section here.
+         Parameters:
+            principal - canonical service principal name, that will be acquired and used when accepting security contexts.
+                    This parameter is optional, if omitted, the default principal will be used.
+                    This parameter cannot be specified together with 'realm' parameter.
+            realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
+                    This parameter is optional, if omitted, no additional filtering by realm will be applied.
+                    This parameter cannot be specified together with 'principal' parameter.
+         Example:
+            <kerberos />
+         Example:
+            <kerberos>
+                <principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
+            </kerberos>
+         Example:
+            <kerberos>
+                <realm>EXAMPLE.COM</realm>
+            </kerberos>
+    --><!-- Sources to read users, roles, access rights, profiles of settings, quotas. --><user_directories><users_xml><!-- Path to configuration file with predefined users. --><path>users.xml</path></users_xml><local_directory><!-- Path to folder where users created by SQL commands are stored. --><path>/var/lib/clickhouse/access/</path></local_directory><!-- To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
+              with the following parameters:
+                server - one of LDAP server names defined in 'ldap_servers' config section above.
+                        This parameter is mandatory and cannot be empty.
+                roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
+                        If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
+                         actions after authentication.
+                role_mapping - section with LDAP search parameters and mapping rules.
+                        When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
+                         name of the logged in user. For each entry found during that search, the value of the specified attribute is
+                         extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
+                         value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
+                         CREATE ROLE command.
+                        There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
+                         applied.
+                    base_dn - template used to construct the base DN for the LDAP search.
+                            The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
+                             substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
+                    scope - scope of the LDAP search.
+                            Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
+                    search_filter - template used to construct the search filter for the LDAP search.
+                            The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
+                             '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
+                             each LDAP search.
+                            Note, that the special characters must be escaped properly in XML.
+                    attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
+                    prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
+                             the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
+                             as local role names. Empty, by default.
+             Example:
+                <ldap>
+                    <server>my_ldap_server</server>
+                    <roles>
+                        <my_local_role1 />
+                        <my_local_role2 />
+                    </roles>
+                    <role_mapping>
+                        <base_dn>ou=groups,dc=example,dc=com</base_dn>
+                        <scope>subtree</scope>
+                        <search_filter>(&amp;(objectClass=groupOfNames)(member={bind_dn}))</search_filter>
+                        <attribute>cn</attribute>
+                        <prefix>clickhouse_</prefix>
+                    </role_mapping>
+                </ldap>
+             Example (typical Active Directory with role mapping that relies on the detected user DN):
+                <ldap>
+                    <server>my_ad_server</server>
+                    <role_mapping>
+                        <base_dn>CN=Users,DC=example,DC=com</base_dn>
+                        <attribute>CN</attribute>
+                        <scope>subtree</scope>
+                        <search_filter>(&amp;(objectClass=group)(member={user_dn}))</search_filter>
+                        <prefix>clickhouse_</prefix>
+                    </role_mapping>
+                </ldap>
+        --></user_directories><access_control_improvements><!-- Enables logic that users without permissive row policies can still read rows using a SELECT query.
+             For example, if there two users A, B and a row policy is defined only for A, then
+             if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
+             By default this setting is false for compatibility with earlier access configurations. --><users_without_row_policies_can_read_rows>false</users_without_row_policies_can_read_rows><!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
+             however you can change this behaviour by setting this to true --><on_cluster_queries_require_cluster_grant>false</on_cluster_queries_require_cluster_grant><!-- By default, for backward compatibility "SELECT * FROM system.<table>" doesn't require any grants and can be executed
+             by any user. You can change this behaviour by setting this to true.
+             If it's set to true then this query requires "GRANT SELECT ON system.<table>" just like as for non-system tables.
+             Exceptions: a few system tables ("tables", "columns", "databases", and some constant tables like "one", "contributors")
+             are still accessible for everyone; and if there is a SHOW privilege (e.g. "SHOW USERS") granted the corresponding system
+             table (i.e. "system.users") will be accessible. --><select_from_system_db_requires_grant>false</select_from_system_db_requires_grant><!-- By default, for backward compatibility "SELECT * FROM information_schema.<table>" doesn't require any grants and can be
+             executed by any user. You can change this behaviour by setting this to true.
+             If it's set to true then this query requires "GRANT SELECT ON information_schema.<table>" just like as for ordinary tables. --><select_from_information_schema_requires_grant>false</select_from_information_schema_requires_grant><!-- By default, for backward compatibility a settings profile constraint for a specific setting inherit every not set field from
+             previous profile. You can change this behaviour by setting this to true.
+             If it's set to true then if settings profile has a constraint for a specific setting, then this constraint completely cancels all
+             actions of previous constraint (defined in other profiles) for the same specific setting, including fields that are not set by new constraint.
+             It also enables 'changeable_in_readonly' constraint type --><settings_constraints_replace_previous>false</settings_constraints_replace_previous><!-- Number of seconds since last access a role is stored in the Role Cache --><role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds></access_control_improvements><!-- Default profile of settings. --><default_profile>default</default_profile><!-- Comma-separated list of prefixes for user-defined settings. --><custom_settings_prefixes/><!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). --><!-- <system_profile>default</system_profile> --><!-- Buffer profile of settings.
+         This settings are used by Buffer storage to flush data to the underlying table.
+         Default: used from system_profile directive.
+    --><!-- <buffer_profile>default</buffer_profile> --><!-- Default database. --><default_database>default</default_database><!-- Server time zone could be set here.
+
+         Time zone is used when converting between String and DateTime types,
+          when printing DateTime in text formats and parsing DateTime from text,
+          it is used in date and time related functions, if specific time zone was not passed as an argument.
+
+         Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
+         If not specified, system time zone at server startup is used.
+
+         Please note, that server could display time zone alias instead of specified name.
+         Example: Zulu is an alias for UTC.
+    --><!-- <timezone>UTC</timezone> --><!-- You can specify umask here (see "man umask"). Server will apply it on startup.
+         Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+    --><!-- <umask>022</umask> --><!-- Perform mlockall after startup to lower first queries latency
+          and to prevent clickhouse executable from being paged out under high IO load.
+         Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
+    --><mlock_executable>true</mlock_executable><!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. --><remap_executable>false</remap_executable><![CDATA[
+         Uncomment below in order to use JDBC table engine and function.
+
+         To install and run JDBC bridge in background:
+         * [Debian/Ubuntu]
+           export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
+           export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
+           wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+           apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
+           clickhouse-jdbc-bridge &
+
+         * [CentOS/RHEL]
+           export MVN_URL=https://repo1.maven.org/maven2/com/clickhouse/clickhouse-jdbc-bridge/
+           export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
+           wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+           yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
+           clickhouse-jdbc-bridge &
+
+         Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
+    ]]><!--
+    <jdbc_bridge>
+        <host>127.0.0.1</host>
+        <port>9019</port>
+    </jdbc_bridge>
+    --><!-- Configuration of clusters that could be used in Distributed tables.
+         https://clickhouse.com/docs/en/operations/table_engines/distributed/
+      --><!-- The list of hosts allowed to use in URL-related storage engines and table functions.
+        If this section is not present in configuration, all hosts are allowed.
+    --><!--<remote_url_allow_hosts>--><!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
+            Example: "clickhouse.com", "clickhouse.com." and "www.clickhouse.com" are different hosts.
+                    If port is explicitly specified in URL, the host:port is checked as a whole.
+                    If host specified here without port, any port with this host allowed.
+                    "clickhouse.com" -> "clickhouse.com:443", "clickhouse.com:80" etc. is allowed, but "clickhouse.com:80" -> only "clickhouse.com:80" is allowed.
+            If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
+            If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
+            Host should be specified using the host xml tag:
+                    <host>clickhouse.com</host>
+        --><!-- Regular expression can be specified. RE2 engine is used for regexps.
+            Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
+            (forgetting to do so is a common source of error).
+        --><!--</remote_url_allow_hosts>--><!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
+         By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
+         Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
+      --><!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
+         Optional. If you don't use replicated tables, you could omit that.
+
+         See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
+      --><!--
+    <zookeeper>
+        <node>
+            <host>example1</host>
+            <port>2181</port>
+        </node>
+        <node>
+            <host>example2</host>
+            <port>2181</port>
+        </node>
+        <node>
+            <host>example3</host>
+            <port>2181</port>
+        </node>
+    </zookeeper>
+    --><!-- Substitutions for parameters of replicated tables.
+          Optional. If you don't use replicated tables, you could omit that.
+
+         See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
+      --><!--
+    <macros>
+        <shard>01</shard>
+        <replica>example01-01-1</replica>
+    </macros>
+    --><!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. --><builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval><!-- Maximum session timeout, in seconds. Default: 3600. --><max_session_timeout>3600</max_session_timeout><!-- Default session timeout, in seconds. Default: 60. --><default_session_timeout>60</default_session_timeout><!-- Sending data to Graphite for monitoring. Several sections can be defined. --><!--
+        interval - send every X second
+        root_path - prefix for keys
+        hostname_in_path - append hostname to root_path (default = true)
+        metrics - send data from table system.metrics
+        events - send data from table system.events
+        asynchronous_metrics - send data from table system.asynchronous_metrics
+    --><!--
+    <graphite>
+        <host>localhost</host>
+        <port>42000</port>
+        <timeout>0.1</timeout>
+        <interval>60</interval>
+        <root_path>one_min</root_path>
+        <hostname_in_path>true</hostname_in_path>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <events_cumulative>false</events_cumulative>
+        <asynchronous_metrics>true</asynchronous_metrics>
+    </graphite>
+    <graphite>
+        <host>localhost</host>
+        <port>42000</port>
+        <timeout>0.1</timeout>
+        <interval>1</interval>
+        <root_path>one_sec</root_path>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <events_cumulative>false</events_cumulative>
+        <asynchronous_metrics>false</asynchronous_metrics>
+    </graphite>
+    --><!-- Serve endpoint for Prometheus monitoring. --><!--
+        endpoint - mertics path (relative to root, statring with "/")
+        port - port to setup server. If not defined or 0 than http_port used
+        metrics - send data from table system.metrics
+        events - send data from table system.events
+        asynchronous_metrics - send data from table system.asynchronous_metrics
+        status_info - send data from different component from CH, ex: Dictionaries status
+    --><!--
+    <prometheus>
+        <endpoint>/metrics</endpoint>
+        <port>9363</port>
+
+        <metrics>true</metrics>
+        <events>true</events>
+        <asynchronous_metrics>true</asynchronous_metrics>
+        <status_info>true</status_info>
+    </prometheus>
+    --><!-- Query log. Used only for queries with setting log_queries = 1. --><query_log><!-- What table to insert data. If table is not exist, it will be created.
+             When query log structure is changed after system update,
+              then old table will be renamed and new table will be created automatically.
+        --><database>system</database><table>query_log</table><!--
+            PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
+            Example:
+                event_date
+                toMonday(event_date)
+                toYYYYMM(event_date)
+                toStartOfHour(event_time)
+        --><partition_by>toYYYYMM(event_date)</partition_by><!--
+            Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
+            Example:
+                event_date + INTERVAL 1 WEEK
+                event_date + INTERVAL 7 DAY DELETE
+                event_date + INTERVAL 2 WEEK TO DISK 'bbb'
+
+        <ttl>event_date + INTERVAL 30 DAY DELETE</ttl>
+        --><!-- Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
+             Example: <engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
+          --><!-- Interval of flushing data. --><flush_interval_milliseconds>7500</flush_interval_milliseconds><!-- example of using a different storage policy for a system table --><!-- storage_policy>local_ssd</storage_policy --></query_log><!-- Trace log. Stores stack traces collected by query profilers.
+         See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. --><trace_log><database>system</database><table>trace_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></trace_log><!-- Query thread log. Has information about all threads participated in query execution.
+         Used only for queries with setting log_query_threads = 1. --><query_thread_log><database>system</database><table>query_thread_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></query_thread_log><!-- Query views log. Has information about all dependent views associated with a query.
+         Used only for queries with setting log_query_views = 1. --><query_views_log><database>system</database><table>query_views_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></query_views_log><!-- Uncomment if use part log.
+         Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).--><part_log><database>system</database><table>part_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></part_log><!-- Uncomment to write text log into table.
+         Text log contains all information from usual server log but stores it in structured and efficient way.
+         The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
+    <text_log>
+        <database>system</database>
+        <table>text_log</table>
+        <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+        <level></level>
+    </text_log>
+    --><!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. --><metric_log><database>system</database><table>metric_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds><collect_interval_milliseconds>1000</collect_interval_milliseconds></metric_log><!--
+        Asynchronous metric log contains values of metrics from
+        system.asynchronous_metrics.
+    --><asynchronous_metric_log><database>system</database><table>asynchronous_metric_log</table><flush_interval_milliseconds>7000</flush_interval_milliseconds></asynchronous_metric_log><!--
+        OpenTelemetry log contains OpenTelemetry trace spans.
+    --><opentelemetry_span_log><!--
+            The default table creation code is insufficient, this <engine> spec
+            is a workaround. There is no 'event_time' for this log, but two times,
+            start and finish. It is sorted by finish time, to avoid inserting
+            data too far away in the past (probably we can sometimes insert a span
+            that is seconds earlier than the last span in the table, due to a race
+            between several spans inserted in parallel). This gives the spans a
+            global order that we can use to e.g. retry insertion into some external
+            system.
+        --><engine>
+            engine MergeTree
+            partition by toYYYYMM(finish_date)
+            order by (finish_date, finish_time_us, trace_id)
+        </engine><database>system</database><table>opentelemetry_span_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds></opentelemetry_span_log><!-- Crash log. Stores stack traces for fatal errors.
+         This table is normally empty. --><crash_log><database>system</database><table>crash_log</table><partition_by/><flush_interval_milliseconds>1000</flush_interval_milliseconds></crash_log><!-- Session log. Stores user log in (successful or not) and log out events.
+
+        Note: session log has known security issues and should not be used in production.
+    --><!-- <session_log>
+        <database>system</database>
+        <table>session_log</table>
+
+        <partition_by>toYYYYMM(event_date)</partition_by>
+        <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+    </session_log> --><!-- Profiling on Processors level. --><processors_profile_log><database>system</database><table>processors_profile_log</table><partition_by>toYYYYMM(event_date)</partition_by><flush_interval_milliseconds>7500</flush_interval_milliseconds></processors_profile_log><!-- Log of asynchronous inserts. It allows to check status
+         of insert query in fire-and-forget mode.
+    --><asynchronous_insert_log><database>system</database><table>asynchronous_insert_log</table><flush_interval_milliseconds>7500</flush_interval_milliseconds><partition_by>event_date</partition_by><ttl>event_date + INTERVAL 3 DAY</ttl></asynchronous_insert_log><!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> --><!-- Custom TLD lists.
+         Format: <name>/path/to/file</name>
+
+         Changes will not be applied w/o server restart.
+         Path to the list is under top_level_domains_path (see above).
+    --><top_level_domains_lists><!--
+        <public_suffix_list>/path/to/public_suffix_list.dat</public_suffix_list>
+        --></top_level_domains_lists><!-- Configuration of external dictionaries. See:
+         https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
+    --><dictionaries_config>*_dictionary.xml</dictionaries_config><!-- Configuration of user defined executable functions --><user_defined_executable_functions_config>*_function.xml</user_defined_executable_functions_config><!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
+     If not specified they will be stored locally. --><!-- <user_defined_zookeeper_path>/clickhouse/user_defined<user_defined_zookeeper_path> --><!-- Uncomment if you want data to be compressed 30-100% better.
+         Don't do that if you just started using ClickHouse.
+      --><!--
+    <compression>
+        <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
+        <case>
+
+            <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
+            <min_part_size>10000000000</min_part_size>        <!- - Min part size in bytes. - ->
+            <min_part_size_ratio>0.01</min_part_size_ratio>   <!- - Min size of part relative to whole table size. - ->
+
+            <!- - What compression method to use. - ->
+            <method>zstd</method>
+        </case>
+    </compression>
+    --><!-- Configuration of encryption. The server executes a command to
+         obtain an encryption key at startup if such a command is
+         defined, or encryption codecs will be disabled otherwise. The
+         command is executed through /bin/sh and is expected to write
+         a Base64-encoded key to the stdout. --><encryption_codecs><!-- aes_128_gcm_siv --><!-- Example of getting hex key from env --><!-- the code should use this key and throw an exception if its length is not 16 bytes --><!--key_hex from_env="..."></key_hex --><!-- Example of multiple hex keys. They can be imported from env or be written down in config--><!-- the code should use these keys and throw an exception if their length is not 16 bytes --><!-- key_hex id="0">...</key_hex --><!-- key_hex id="1" from_env=".."></key_hex --><!-- key_hex id="2">...</key_hex --><!-- current_key_id>2</current_key_id --><!-- Example of getting hex key from config --><!-- the code should use this key and throw an exception if its length is not 16 bytes --><!-- key>...</key --><!-- example of adding nonce --><!-- nonce>...</nonce --><!-- /aes_128_gcm_siv --></encryption_codecs><!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
+         Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. --><distributed_ddl><!-- Path in ZooKeeper to queue with DDL queries --><path>/clickhouse/task_queue/ddl</path><!-- Settings from this profile will be used to execute DDL queries --><!-- <profile>default</profile> --><!-- Controls how much ON CLUSTER queries can be run simultaneously. --><!-- <pool_size>1</pool_size> --><!--
+             Cleanup settings (active tasks will not be removed)
+        --><!-- Controls task TTL (default 1 week) --><!-- <task_max_lifetime>604800</task_max_lifetime> --><!-- Controls how often cleanup should be performed (in seconds) --><!-- <cleanup_delay_period>60</cleanup_delay_period> --><!-- Controls how many tasks could be in the queue --><!-- <max_tasks_in_queue>1000</max_tasks_in_queue> --></distributed_ddl><!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h --><!--
+    <merge_tree>
+        <max_suspicious_broken_parts>5</max_suspicious_broken_parts>
+    </merge_tree>
+    --><!-- Protection from accidental DROP.
+         If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
+         If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
+         By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
+         The same for max_partition_size_to_drop.
+         Uncomment to disable protection.
+    --><!-- <max_table_size_to_drop>0</max_table_size_to_drop> --><!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> --><!-- Example of parameters for GraphiteMergeTree table engine --><graphite_rollup_example><pattern><regexp>click_cost</regexp><function>any</function><retention><age>0</age><precision>3600</precision></retention><retention><age>86400</age><precision>60</precision></retention></pattern><default><function>max</function><retention><age>0</age><precision>60</precision></retention><retention><age>3600</age><precision>300</precision></retention><retention><age>86400</age><precision>3600</precision></retention></default></graphite_rollup_example><!-- Directory in <clickhouse-path> containing schema files for various input formats.
+         The directory will be created if it doesn't exist.
+      --><format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path><!-- Default query masking rules, matching lines would be replaced with something else in the logs
+        (both text logs and system.query_log).
+        name - name for the rule (optional)
+        regexp - RE2 compatible regular expression (mandatory)
+        replace - substitution string for sensitive data (optional, by default - six asterisks)
+    <query_masking_rules>
+        <rule>
+            <name>hide encrypt/decrypt arguments</name>
+            <regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
+            <replace>\1(???)</replace>
+        </rule>
+    </query_masking_rules> --><!-- Uncomment to use custom http handlers.
+        rules are checked from top to bottom, first match runs the handler
+            url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
+            methods - to match request method, you can use commas to separate multiple method matches(optional)
+            headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
+        handler is request handler
+            type - supported types: static, dynamic_query_handler, predefined_query_handler
+            query - use with predefined_query_handler type, executes query when the handler is called
+            query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
+            status - use with static type, response status code
+            content_type - use with static type, response content-type
+            response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
+
+    <http_handlers>
+        <rule>
+            <url>/</url>
+            <methods>POST,GET</methods>
+            <headers><pragma>no-cache</pragma></headers>
+            <handler>
+                <type>dynamic_query_handler</type>
+                <query_param_name>query</query_param_name>
+            </handler>
+        </rule>
+
+        <rule>
+            <url>/predefined_query</url>
+            <methods>POST,GET</methods>
+            <handler>
+                <type>predefined_query_handler</type>
+                <query>SELECT * FROM system.settings</query>
+            </handler>
+        </rule>
+
+        <rule>
+            <handler>
+                <type>static</type>
+                <status>200</status>
+                <content_type>text/plain; charset=UTF-8</content_type>
+                <response_content>config://http_server_default_response</response_content>
+            </handler>
+        </rule>
+    </http_handlers>
+    --><send_crash_reports><!-- Changing <enabled> to true allows sending crash reports to --><!-- the ClickHouse core developers team via Sentry https://sentry.io --><!-- Doing so at least in pre-production environments is highly appreciated --><enabled>false</enabled><!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report --><anonymize>false</anonymize><!-- Default endpoint should be changed to different Sentry DSN only if you have --><!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you --><endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint></send_crash_reports><!-- Uncomment to disable ClickHouse internal DNS caching. --><!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> --><!-- You can also configure rocksdb like this: --><!--
+    <rocksdb>
+        <options>
+            <max_background_jobs>8</max_background_jobs>
+        </options>
+        <column_family_options>
+            <num_levels>2</num_levels>
+        </column_family_options>
+        <tables>
+            <table>
+                <name>TABLE</name>
+                <options>
+                    <max_background_jobs>8</max_background_jobs>
+                </options>
+                <column_family_options>
+                    <num_levels>2</num_levels>
+                </column_family_options>
+            </table>
+        </tables>
+    </rocksdb>
+    --><!-- Configuration for the query cache --><!-- <query_cache> --><!--     <max_size>1073741824</max_size> --><!--     <max_entries>1024</max_entries> --><!--     <max_entry_size>1048576</max_entry_size> --><!--     <max_entry_rows>30000000</max_entry_rows> --><!-- </query_cache> --><!-- Uncomment if enable merge tree metadata cache --><!--merge_tree_metadata_cache>
+        <lru_cache_size>268435456</lru_cache_size>
+        <continue_if_corrupted>true</continue_if_corrupted>
+    </merge_tree_metadata_cache--><!-- This allows to disable exposing addresses in stack traces for security reasons.
+         Please be aware that it does not improve security much, but makes debugging much harder.
+         The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
+         Regardless of this configuration, the addresses are visible in the system.stack_trace and system.trace_log tables
+         if the user has access to these tables.
+         I don't recommend to change this setting.
+    <show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
+    --><!-- On Linux systems this can control the behavior of OOM killer.
+    <oom_score>-1000</oom_score>
+    --><http_port from_env="CLICKHOUSE_HTTP_PORT"/><tcp_port from_env="CLICKHOUSE_TCP_PORT"/><mysql_port from_env="CLICKHOUSE_MYSQL_PORT"/><postgresql_port from_env="CLICKHOUSE_POSTGRESQL_PORT"/><interserver_http_port from_env="CLICKHOUSE_INTERSERVER_HTTP_PORT"/></clickhouse>

+ 102 - 0
clickhouse_etc/users.xml

@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<clickhouse>
+  <!-- See also the files in users.d directory where the settings can be overridden. -->
+  <!-- Profiles of settings. -->
+  <profiles>
+    <!-- Default settings. -->
+    <default>
+        </default>
+    <!-- Profile that allows only read queries. -->
+    <readonly>
+      <readonly>1</readonly>
+    </readonly>
+  </profiles>
+  <!-- Users and ACL. -->
+  <users>
+    <!-- If user name was not specified, 'default' user is used. -->
+    <default>
+      <!-- See also the files in users.d directory where the password can be overridden.
+
+                 Password could be specified in plaintext or in SHA256 (in hex format).
+
+                 If you want to specify password in plaintext (not recommended), place it in 'password' element.
+                 Example: <password>qwerty</password>.
+                 Password could be empty.
+
+                 If you want to specify SHA256, place it in 'password_sha256_hex' element.
+                 Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
+                 Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
+
+                 If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
+                 Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
+
+                 If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
+                  place its name in 'server' element inside 'ldap' element.
+                 Example: <ldap><server>my_ldap_server</server></ldap>
+
+                 If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
+                  place 'kerberos' element instead of 'password' (and similar) elements.
+                 The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
+                 You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
+                  whose initiator's realm matches it.
+                 Example: <kerberos />
+                 Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
+
+                 How to generate decent password:
+                 Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
+                 In first line will be password and in second - corresponding SHA256.
+
+                 How to generate double SHA1:
+                 Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
+                 In first line will be password and in second - corresponding double SHA1.
+            -->
+      <password from_env="CLICKHOUSE_ADMIN_PASSWORD"/>
+      <!-- List of networks with open access.
+
+                 To open access from everywhere, specify:
+                    <ip>::/0</ip>
+
+                 To open access only from localhost, specify:
+                    <ip>::1</ip>
+                    <ip>127.0.0.1</ip>
+
+                 Each element of list has one of the following forms:
+                 <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
+                     2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+                 <host> Hostname. Example: server01.clickhouse.com.
+                     To check access, DNS query is performed, and all received addresses compared to peer address.
+                 <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
+                     To check access, DNS PTR query is performed for peer address and then regexp is applied.
+                     Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
+                     Strongly recommended that regexp is ends with $
+                 All results of DNS requests are cached till server restart.
+            -->
+      <networks>
+        <ip>::/0</ip>
+      </networks>
+      <!-- Settings profile for user. -->
+      <profile>default</profile>
+      <!-- Quota for user. -->
+      <quota>default</quota>
+      <!-- User can create other users and grant rights to them. -->
+      <!-- <access_management>1</access_management> -->
+    </default>
+  </users>
+  <!-- Quotas. -->
+  <quotas>
+    <!-- Name of quota. -->
+    <default>
+      <!-- Limits for time interval. You could specify many intervals with different limits. -->
+      <interval>
+        <!-- Length of interval. -->
+        <duration>3600</duration>
+        <!-- No limits. Just calculate resource usage for time interval. -->
+        <queries>0</queries>
+        <errors>0</errors>
+        <result_rows>0</result_rows>
+        <read_rows>0</read_rows>
+        <execution_time>0</execution_time>
+      </interval>
+    </default>
+  </quotas>
+</clickhouse>

+ 24 - 6
deepflow/deepflow-otel-spring-demo.yaml

@@ -73,7 +73,7 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=service.name=item-svc'
+          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -84,6 +84,8 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: status.podIP
+        - name: OTEL_RESOURCE_ATTRIBUTES
+          value: service.name=item-svc,pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -159,7 +161,7 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=service.name=order-svc'
+          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -172,6 +174,8 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: status.podIP
+        - name: OTEL_RESOURCE_ATTRIBUTES
+          value: service.name=order-svc,pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -244,7 +248,7 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=service.name=stock-svc'
+          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -255,6 +259,8 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: status.podIP
+        - name: OTEL_RESOURCE_ATTRIBUTES
+          value: service.name=stock-svc,pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -330,7 +336,7 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=service.name=user-svc'
+          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -343,6 +349,8 @@ spec:
           valueFrom:
             fieldRef:
               fieldPath: status.podIP
+        - name: OTEL_RESOURCE_ATTRIBUTES
+          value: service.name=user-svc,pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -418,7 +426,7 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=service.name=shop-web'
+          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -427,6 +435,12 @@ spec:
         ports:
         - containerPort: 8090
         env:
+        - name: SERVICE_HOST
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: OTEL_RESOURCE_ATTRIBUTES
+          value: service.name=shop-web,pod.ip=$(SERVICE_HOST)
         - name: SW_AGENT_NAME
           value: spring-svc-webshop
         - name: OTEL_EXPORTER_OTLP_ENDPOINT
@@ -492,8 +506,12 @@ spec:
               value: web-shop:8090
             - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
               value: http://otel-collector-opentelemetry-collector.observe.svc.cluster.local:4317
+            - name: MY_POD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
             - name: OTEL_RESOURCE_ATTRIBUTES
-              value: service.name=loadgenerator
+              value: service.name=loadgenerator,pod.ip=$(MY_POD_IP)
             - name: USERS
               value: '1'
 ##################################################################################################

+ 48 - 0
grafana/templates/quota.yml

@@ -0,0 +1,48 @@
+apiVersion: v1
+kind: List
+items:
+- apiVersion: v1
+  kind: ResourceQuota
+  metadata:
+    name: pods-high
+  spec:
+    hard:
+      cpu: "1000"
+      memory: 200Gi
+      pods: "10"
+    scopeSelector:
+      matchExpressions:
+      - operator: In
+        scopeName: PriorityClass
+        values: ["high"]
+- apiVersion: v1
+  kind: ResourceQuota
+  metadata:
+    name: pods-medium
+  spec:
+    hard:
+      cpu: "10"
+      memory: 20Gi
+      pods: "10"
+    scopeSelector:
+      matchExpressions:
+      - operator: In
+        scopeName: PriorityClass
+        values: ["medium"]
+- apiVersion: v1
+  kind: ResourceQuota
+  metadata:
+    name: pods-low
+  spec:
+    hard:
+      cpu: "5"
+      memory: 10Gi
+      pods: "10"
+    scopeSelector:
+      matchExpressions:
+      - operator: In
+        scopeName: PriorityClass
+        values: ["low"]
+
+
+

+ 7 - 6
grafana/values.yaml

@@ -39,7 +39,7 @@ serviceAccount:
 #    eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
   autoMount: true
 
-replicas: 1
+replicas: 20
 
 ## Create a headless service for the deployment
 headlessService: false
@@ -134,7 +134,7 @@ extraEmptyDirMounts: []
 extraLabels: {}
 
 ## Assign a PriorityClassName to pods if set
-# priorityClassName:
+priorityClassName: pods-high
 
 downloadDashboardsImage:
   repository: curlimages/curl
@@ -255,13 +255,13 @@ ingress:
   #    hosts:
   #      - chart-example.local
 
-resources: {}
+resources:
 #  limits:
 #    cpu: 100m
 #    memory: 128Mi
-#  requests:
-#    cpu: 100m
-#    memory: 128Mi
+  requests:
+    cpu: 500m
+    memory: 10Gi
 
 ## Node labels for pod assignment
 ## ref: https://kubernetes.io/docs/user-guide/node-selection/
@@ -1214,3 +1214,4 @@ extraObjects: []
   #     data:
   #       - key: grafana-admin-password
   #         name: adminPassword
+

+ 21 - 0
kafka/helm/kafka/.helmignore

@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj

+ 9 - 0
kafka/helm/kafka/Chart.lock

@@ -0,0 +1,9 @@
+dependencies:
+- name: zookeeper
+  repository: oci://registry-1.docker.io/bitnamicharts
+  version: 11.4.2
+- name: common
+  repository: oci://registry-1.docker.io/bitnamicharts
+  version: 2.4.0
+digest: sha256:d0d3db738ca58fe404cf471499d6cc66827a3480835f4cab0de5053c9684950e
+generated: "2023-06-07T04:12:40.544851481Z"

+ 33 - 0
kafka/helm/kafka/Chart.yaml

@@ -0,0 +1,33 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 3.4.1
+dependencies:
+- condition: zookeeper.enabled
+  name: zookeeper
+  repository: oci://registry-1.docker.io/bitnamicharts
+  version: 11.x.x
+- name: common
+  repository: oci://registry-1.docker.io/bitnamicharts
+  tags:
+  - bitnami-common
+  version: 2.x.x
+description: Apache Kafka is a distributed streaming platform designed to build real-time
+  pipelines and can be used as a message broker or as a replacement for a log aggregation
+  solution for big data applications.
+home: https://bitnami.com
+icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png
+keywords:
+- kafka
+- zookeeper
+- streaming
+- producer
+- consumer
+maintainers:
+- name: VMware, Inc.
+  url: https://github.com/bitnami/charts
+name: kafka
+sources:
+- https://github.com/bitnami/charts/tree/main/bitnami/kafka
+version: 22.1.5

+ 1073 - 0
kafka/helm/kafka/README.md

@@ -0,0 +1,1073 @@
+<!--- app-name: Apache Kafka -->
+
+# Apache Kafka packaged by Bitnami
+
+Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications.
+
+[Overview of Apache Kafka](http://kafka.apache.org/)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/kafka
+```
+
+## Introduction
+
+This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/kafka
+```
+
+These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name                      | Description                                     | Value |
+| ------------------------- | ----------------------------------------------- | ----- |
+| `global.imageRegistry`    | Global Docker image registry                    | `""`  |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]`  |
+| `global.storageClass`     | Global StorageClass for Persistent Volume(s)    | `""`  |
+
+### Common parameters
+
+| Name                      | Description                                                                             | Value           |
+| ------------------------- | --------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion`             | Override Kubernetes version                                                             | `""`            |
+| `nameOverride`            | String to partially override common.names.fullname                                      | `""`            |
+| `fullnameOverride`        | String to fully override common.names.fullname                                          | `""`            |
+| `clusterDomain`           | Default Kubernetes cluster domain                                                       | `cluster.local` |
+| `commonLabels`            | Labels to add to all deployed objects                                                   | `{}`            |
+| `commonAnnotations`       | Annotations to add to all deployed objects                                              | `{}`            |
+| `extraDeploy`             | Array of extra objects to deploy with the release                                       | `[]`            |
+| `serviceBindings.enabled` | Create secret for service binding (Experimental)                                        | `false`         |
+| `diagnosticMode.enabled`  | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false`         |
+| `diagnosticMode.command`  | Command to override all containers in the statefulset                                   | `["sleep"]`     |
+| `diagnosticMode.args`     | Args to override all containers in the statefulset                                      | `["infinity"]`  |
+
+### Kafka parameters
+
+| Name                                              | Description                                                                                                                                                                         | Value                               |
+| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
+| `image.registry`                                  | Kafka image registry                                                                                                                                                                | `docker.io`                         |
+| `image.repository`                                | Kafka image repository                                                                                                                                                              | `bitnami/kafka`                     |
+| `image.tag`                                       | Kafka image tag (immutable tags are recommended)                                                                                                                                    | `3.4.1-debian-11-r0`                |
+| `image.digest`                                    | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                                                                               | `""`                                |
+| `image.pullPolicy`                                | Kafka image pull policy                                                                                                                                                             | `IfNotPresent`                      |
+| `image.pullSecrets`                               | Specify docker-registry secret names as an array                                                                                                                                    | `[]`                                |
+| `image.debug`                                     | Specify if debug values should be set                                                                                                                                               | `false`                             |
+| `config`                                          | Configuration file for Kafka. Auto-generated based on other parameters when not specified                                                                                           | `""`                                |
+| `existingConfigmap`                               | ConfigMap with Kafka Configuration                                                                                                                                                  | `""`                                |
+| `log4j`                                           | An optional log4j.properties file to overwrite the default of the Kafka brokers                                                                                                     | `""`                                |
+| `existingLog4jConfigMap`                          | The name of an existing ConfigMap containing a log4j.properties file                                                                                                                | `""`                                |
+| `heapOpts`                                        | Kafka Java Heap size                                                                                                                                                                | `-Xmx1024m -Xms1024m`               |
+| `deleteTopicEnable`                               | Switch to enable topic deletion or not                                                                                                                                              | `false`                             |
+| `autoCreateTopicsEnable`                          | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments                                                   | `true`                              |
+| `logFlushIntervalMessages`                        | The number of messages to accept before forcing a flush of data to disk                                                                                                             | `_10000`                            |
+| `logFlushIntervalMs`                              | The maximum amount of time a message can sit in a log before we force a flush                                                                                                       | `1000`                              |
+| `logRetentionBytes`                               | A size-based retention policy for logs                                                                                                                                              | `_1073741824`                       |
+| `logRetentionCheckIntervalMs`                     | The interval at which log segments are checked to see if they can be deleted                                                                                                        | `300000`                            |
+| `logRetentionHours`                               | The minimum age of a log file to be eligible for deletion due to age                                                                                                                | `168`                               |
+| `logSegmentBytes`                                 | The maximum size of a log segment file. When this size is reached a new log segment will be created                                                                                 | `_1073741824`                       |
+| `logsDirs`                                        | A comma separated list of directories in which kafka's log data is kept                                                                                                             | `/bitnami/kafka/data`               |
+| `maxMessageBytes`                                 | The largest record batch size allowed by Kafka                                                                                                                                      | `_1000012`                          |
+| `defaultReplicationFactor`                        | Default replication factors for automatically created topics                                                                                                                        | `1`                                 |
+| `offsetsTopicReplicationFactor`                   | The replication factor for the offsets topic                                                                                                                                        | `1`                                 |
+| `transactionStateLogReplicationFactor`            | The replication factor for the transaction topic                                                                                                                                    | `1`                                 |
+| `transactionStateLogMinIsr`                       | Overridden min.insync.replicas config for the transaction topic                                                                                                                     | `1`                                 |
+| `numIoThreads`                                    | The number of threads doing disk I/O                                                                                                                                                | `8`                                 |
+| `numNetworkThreads`                               | The number of threads handling network requests                                                                                                                                     | `3`                                 |
+| `numPartitions`                                   | The default number of log partitions per topic                                                                                                                                      | `1`                                 |
+| `numRecoveryThreadsPerDataDir`                    | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown                                                                            | `1`                                 |
+| `socketReceiveBufferBytes`                        | The receive buffer (SO_RCVBUF) used by the socket server                                                                                                                            | `102400`                            |
+| `socketRequestMaxBytes`                           | The maximum size of a request that the socket server will accept (protection against OOM)                                                                                           | `_104857600`                        |
+| `socketSendBufferBytes`                           | The send buffer (SO_SNDBUF) used by the socket server                                                                                                                               | `102400`                            |
+| `zookeeperConnectionTimeoutMs`                    | Timeout in ms for connecting to ZooKeeper                                                                                                                                           | `6000`                              |
+| `zookeeperChrootPath`                             | Path which puts data under some path in the global ZooKeeper namespace                                                                                                              | `""`                                |
+| `authorizerClassName`                             | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties                                                          | `""`                                |
+| `allowEveryoneIfNoAclFound`                       | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users                                                                 | `true`                              |
+| `superUsers`                                      | You can add super users in server.properties                                                                                                                                        | `User:admin`                        |
+| `auth.clientProtocol`                             | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`                                                       | `plaintext`                         |
+| `auth.externalClientProtocol`                     | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`  | `""`                                |
+| `auth.interBrokerProtocol`                        | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls`                                                       | `plaintext`                         |
+| `auth.controllerProtocol`                         | Controller protocol. It is used with Kraft mode only.                                                                                                                               | `plaintext`                         |
+| `auth.sasl.mechanisms`                            | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` |
+| `auth.sasl.interBrokerMechanism`                  | SASL mechanism for inter broker communication.                                                                                                                                      | `plain`                             |
+| `auth.sasl.jaas.clientUsers`                      | Kafka client user list                                                                                                                                                              | `["user"]`                          |
+| `auth.sasl.jaas.clientPasswords`                  | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers                                                                                         | `[]`                                |
+| `auth.sasl.jaas.interBrokerUser`                  | Kafka inter broker communication user for SASL authentication                                                                                                                       | `admin`                             |
+| `auth.sasl.jaas.interBrokerPassword`              | Kafka inter broker communication password for SASL authentication                                                                                                                   | `""`                                |
+| `auth.sasl.jaas.zookeeperUser`                    | Kafka ZooKeeper user for SASL authentication                                                                                                                                        | `""`                                |
+| `auth.sasl.jaas.zookeeperPassword`                | Kafka ZooKeeper password for SASL authentication                                                                                                                                    | `""`                                |
+| `auth.sasl.jaas.existingSecret`                   | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser                                                                               | `""`                                |
+| `auth.tls.type`                                   | Format to use for TLS certificates. Allowed types: `jks` and `pem`                                                                                                                  | `jks`                               |
+| `auth.tls.pemChainIncluded`                       | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert.                                                                                 | `false`                             |
+| `auth.tls.existingSecrets`                        | Array existing secrets containing the TLS certificates for the Kafka brokers                                                                                                        | `[]`                                |
+| `auth.tls.autoGenerated`                          | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem`                                                         | `false`                             |
+| `auth.tls.password`                               | Password to access the JKS files or PEM key when they are password-protected.                                                                                                       | `""`                                |
+| `auth.tls.existingSecret`                         | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`)                                                 | `""`                                |
+| `auth.tls.jksTruststoreSecret`                    | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets`                                      | `""`                                |
+| `auth.tls.jksKeystoreSAN`                         | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate                                                                                   | `""`                                |
+| `auth.tls.jksTruststore`                          | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore                                                                      | `""`                                |
+| `auth.tls.endpointIdentificationAlgorithm`        | The endpoint identification algorithm to validate server hostname using server certificate                                                                                          | `https`                             |
+| `auth.zookeeper.tls.enabled`                      | Enable TLS for Zookeeper client connections.                                                                                                                                        | `false`                             |
+| `auth.zookeeper.tls.type`                         | Format to use for TLS certificates. Allowed types: `jks` and `pem`.                                                                                                                 | `jks`                               |
+| `auth.zookeeper.tls.verifyHostname`               | Hostname validation.                                                                                                                                                                | `true`                              |
+| `auth.zookeeper.tls.existingSecret`               | Name of the existing secret containing the TLS certificates for ZooKeeper client communications.                                                                                    | `""`                                |
+| `auth.zookeeper.tls.existingSecretKeystoreKey`    | The secret key from the  auth.zookeeper.tls.existingSecret containing the Keystore.                                                                                                 | `zookeeper.keystore.jks`            |
+| `auth.zookeeper.tls.existingSecretTruststoreKey`  | The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore.                                                                                                | `zookeeper.truststore.jks`          |
+| `auth.zookeeper.tls.passwordsSecret`              | Existing secret containing Keystore and Truststore passwords.                                                                                                                       | `""`                                |
+| `auth.zookeeper.tls.passwordsSecretKeystoreKey`   | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore.                                                                                | `keystore-password`                 |
+| `auth.zookeeper.tls.passwordsSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore.                                                                              | `truststore-password`               |
+| `listeners`                                       | The address(es) the socket server listens on. Auto-calculated it's set to an empty array                                                                                            | `[]`                                |
+| `advertisedListeners`                             | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array                                                    | `[]`                                |
+| `listenerSecurityProtocolMap`                     | The protocol->listener mapping. Auto-calculated it's set to nil                                                                                                                     | `""`                                |
+| `allowPlaintextListener`                          | Allow to use the PLAINTEXT listener                                                                                                                                                 | `true`                              |
+| `interBrokerListenerName`                         | The listener that the brokers should communicate on                                                                                                                                 | `INTERNAL`                          |
+| `command`                                         | Override Kafka container command                                                                                                                                                    | `["/scripts/setup.sh"]`             |
+| `args`                                            | Override Kafka container arguments                                                                                                                                                  | `[]`                                |
+| `extraEnvVars`                                    | Extra environment variables to add to Kafka pods                                                                                                                                    | `[]`                                |
+| `extraEnvVarsCM`                                  | ConfigMap with extra environment variables                                                                                                                                          | `""`                                |
+| `extraEnvVarsSecret`                              | Secret with extra environment variables                                                                                                                                             | `""`                                |
+
+### Statefulset parameters
+
+| Name                                                | Description                                                                                                                                                                                   | Value           |
+| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `replicaCount`                                      | Number of Kafka nodes                                                                                                                                                                         | `1`             |
+| `minId`                                             | Minimal node.id or broker.id values, nodes increment their value respectively                                                                                                                 | `0`             |
+| `brokerRackAssignment`                              | Set Broker Assignment for multi tenant environment Allowed values: `aws-az`                                                                                                                   | `""`            |
+| `containerPorts.client`                             | Kafka client container port                                                                                                                                                                   | `9092`          |
+| `containerPorts.controller`                         | Kafka Controller listener port. It is used if "kraft.enabled: true"                                                                                                                           | `9093`          |
+| `containerPorts.internal`                           | Kafka inter-broker container port                                                                                                                                                             | `9094`          |
+| `containerPorts.external`                           | Kafka external container port                                                                                                                                                                 | `9095`          |
+| `livenessProbe.enabled`                             | Enable livenessProbe on Kafka containers                                                                                                                                                      | `true`          |
+| `livenessProbe.initialDelaySeconds`                 | Initial delay seconds for livenessProbe                                                                                                                                                       | `10`            |
+| `livenessProbe.periodSeconds`                       | Period seconds for livenessProbe                                                                                                                                                              | `10`            |
+| `livenessProbe.timeoutSeconds`                      | Timeout seconds for livenessProbe                                                                                                                                                             | `5`             |
+| `livenessProbe.failureThreshold`                    | Failure threshold for livenessProbe                                                                                                                                                           | `3`             |
+| `livenessProbe.successThreshold`                    | Success threshold for livenessProbe                                                                                                                                                           | `1`             |
+| `readinessProbe.enabled`                            | Enable readinessProbe on Kafka containers                                                                                                                                                     | `true`          |
+| `readinessProbe.initialDelaySeconds`                | Initial delay seconds for readinessProbe                                                                                                                                                      | `5`             |
+| `readinessProbe.periodSeconds`                      | Period seconds for readinessProbe                                                                                                                                                             | `10`            |
+| `readinessProbe.timeoutSeconds`                     | Timeout seconds for readinessProbe                                                                                                                                                            | `5`             |
+| `readinessProbe.failureThreshold`                   | Failure threshold for readinessProbe                                                                                                                                                          | `6`             |
+| `readinessProbe.successThreshold`                   | Success threshold for readinessProbe                                                                                                                                                          | `1`             |
+| `startupProbe.enabled`                              | Enable startupProbe on Kafka containers                                                                                                                                                       | `false`         |
+| `startupProbe.initialDelaySeconds`                  | Initial delay seconds for startupProbe                                                                                                                                                        | `30`            |
+| `startupProbe.periodSeconds`                        | Period seconds for startupProbe                                                                                                                                                               | `10`            |
+| `startupProbe.timeoutSeconds`                       | Timeout seconds for startupProbe                                                                                                                                                              | `1`             |
+| `startupProbe.failureThreshold`                     | Failure threshold for startupProbe                                                                                                                                                            | `15`            |
+| `startupProbe.successThreshold`                     | Success threshold for startupProbe                                                                                                                                                            | `1`             |
+| `customLivenessProbe`                               | Custom livenessProbe that overrides the default one                                                                                                                                           | `{}`            |
+| `customReadinessProbe`                              | Custom readinessProbe that overrides the default one                                                                                                                                          | `{}`            |
+| `customStartupProbe`                                | Custom startupProbe that overrides the default one                                                                                                                                            | `{}`            |
+| `lifecycleHooks`                                    | lifecycleHooks for the Kafka container to automate configuration before or after startup                                                                                                      | `{}`            |
+| `resources.limits`                                  | The resources limits for the container                                                                                                                                                        | `{}`            |
+| `resources.requests`                                | The requested resources for the container                                                                                                                                                     | `{}`            |
+| `podSecurityContext.enabled`                        | Enable security context for the pods                                                                                                                                                          | `true`          |
+| `podSecurityContext.fsGroup`                        | Set Kafka pod's Security Context fsGroup                                                                                                                                                      | `1001`          |
+| `containerSecurityContext.enabled`                  | Enable Kafka containers' Security Context                                                                                                                                                     | `true`          |
+| `containerSecurityContext.runAsUser`                | Set Kafka containers' Security Context runAsUser                                                                                                                                              | `1001`          |
+| `containerSecurityContext.runAsNonRoot`             | Set Kafka containers' Security Context runAsNonRoot                                                                                                                                           | `true`          |
+| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege                                                                                                                                             | `false`         |
+| `hostAliases`                                       | Kafka pods host aliases                                                                                                                                                                       | `[]`            |
+| `hostNetwork`                                       | Specify if host network should be enabled for Kafka pods                                                                                                                                      | `false`         |
+| `hostIPC`                                           | Specify if host IPC should be enabled for Kafka pods                                                                                                                                          | `false`         |
+| `podLabels`                                         | Extra labels for Kafka pods                                                                                                                                                                   | `{}`            |
+| `podAnnotations`                                    | Extra annotations for Kafka pods                                                                                                                                                              | `{}`            |
+| `podAffinityPreset`                                 | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                           | `""`            |
+| `podAntiAffinityPreset`                             | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                      | `soft`          |
+| `nodeAffinityPreset.type`                           | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                     | `""`            |
+| `nodeAffinityPreset.key`                            | Node label key to match Ignored if `affinity` is set.                                                                                                                                         | `""`            |
+| `nodeAffinityPreset.values`                         | Node label values to match. Ignored if `affinity` is set.                                                                                                                                     | `[]`            |
+| `affinity`                                          | Affinity for pod assignment                                                                                                                                                                   | `{}`            |
+| `nodeSelector`                                      | Node labels for pod assignment                                                                                                                                                                | `{}`            |
+| `tolerations`                                       | Tolerations for pod assignment                                                                                                                                                                | `[]`            |
+| `topologySpreadConstraints`                         | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template                                                                      | `[]`            |
+| `terminationGracePeriodSeconds`                     | Seconds the pod needs to gracefully terminate                                                                                                                                                 | `""`            |
+| `podManagementPolicy`                               | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel`      |
+| `priorityClassName`                                 | Name of the existing priority class to be used by kafka pods                                                                                                                                  | `""`            |
+| `schedulerName`                                     | Name of the k8s scheduler (other than default)                                                                                                                                                | `""`            |
+| `updateStrategy.type`                               | Kafka statefulset strategy type                                                                                                                                                               | `RollingUpdate` |
+| `updateStrategy.rollingUpdate`                      | Kafka statefulset rolling update configuration parameters                                                                                                                                     | `{}`            |
+| `extraVolumes`                                      | Optionally specify extra list of additional volumes for the Kafka pod(s)                                                                                                                      | `[]`            |
+| `extraVolumeMounts`                                 | Optionally specify extra list of additional volumeMounts for the Kafka container(s)                                                                                                           | `[]`            |
+| `sidecars`                                          | Add additional sidecar containers to the Kafka pod(s)                                                                                                                                         | `[]`            |
+| `initContainers`                                    | Add additional Add init containers to the Kafka pod(s)                                                                                                                                        | `[]`            |
+| `pdb.create`                                        | Deploy a pdb object for the Kafka pod                                                                                                                                                         | `false`         |
+| `pdb.minAvailable`                                  | Maximum number/percentage of unavailable Kafka replicas                                                                                                                                       | `""`            |
+| `pdb.maxUnavailable`                                | Maximum number/percentage of unavailable Kafka replicas                                                                                                                                       | `1`             |
+
+### Traffic Exposure parameters
+
+| Name                                              | Description                                                                                                                               | Value                  |
+| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
+| `service.type`                                    | Kubernetes Service type                                                                                                                   | `ClusterIP`            |
+| `service.ports.client`                            | Kafka svc port for client connections                                                                                                     | `9092`                 |
+| `service.ports.controller`                        | Kafka svc port for controller connections. It is used if "kraft.enabled: true"                                                            | `9093`                 |
+| `service.ports.internal`                          | Kafka svc port for inter-broker connections                                                                                               | `9094`                 |
+| `service.ports.external`                          | Kafka svc port for external connections                                                                                                   | `9095`                 |
+| `service.nodePorts.client`                        | Node port for the Kafka client connections                                                                                                | `""`                   |
+| `service.nodePorts.external`                      | Node port for the Kafka external connections                                                                                              | `""`                   |
+| `service.sessionAffinity`                         | Control where client requests go, to the same pod or round-robin                                                                          | `None`                 |
+| `service.sessionAffinityConfig`                   | Additional settings for the sessionAffinity                                                                                               | `{}`                   |
+| `service.clusterIP`                               | Kafka service Cluster IP                                                                                                                  | `""`                   |
+| `service.loadBalancerIP`                          | Kafka service Load Balancer IP                                                                                                            | `""`                   |
+| `service.loadBalancerSourceRanges`                | Kafka service Load Balancer sources                                                                                                       | `[]`                   |
+| `service.externalTrafficPolicy`                   | Kafka service external traffic policy                                                                                                     | `Cluster`              |
+| `service.annotations`                             | Additional custom annotations for Kafka service                                                                                           | `{}`                   |
+| `service.headless.publishNotReadyAddresses`       | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready                  | `false`                |
+| `service.headless.annotations`                    | Annotations for the headless service.                                                                                                     | `{}`                   |
+| `service.headless.labels`                         | Labels for the headless service.                                                                                                          | `{}`                   |
+| `service.extraPorts`                              | Extra ports to expose in the Kafka service (normally used with the `sidecar` value)                                                       | `[]`                   |
+| `externalAccess.enabled`                          | Enable Kubernetes external cluster access to Kafka brokers                                                                                | `false`                |
+| `externalAccess.autoDiscovery.enabled`            | Enable using an init container to auto-detect external IPs/ports by querying the K8s API                                                  | `false`                |
+| `externalAccess.autoDiscovery.image.registry`     | Init container auto-discovery image registry                                                                                              | `docker.io`            |
+| `externalAccess.autoDiscovery.image.repository`   | Init container auto-discovery image repository                                                                                            | `bitnami/kubectl`      |
+| `externalAccess.autoDiscovery.image.tag`          | Init container auto-discovery image tag (immutable tags are recommended)                                                                  | `1.25.10-debian-11-r6` |
+| `externalAccess.autoDiscovery.image.digest`       | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                                   | `""`                   |
+| `externalAccess.autoDiscovery.image.pullPolicy`   | Init container auto-discovery image pull policy                                                                                           | `IfNotPresent`         |
+| `externalAccess.autoDiscovery.image.pullSecrets`  | Init container auto-discovery image pull secrets                                                                                          | `[]`                   |
+| `externalAccess.autoDiscovery.resources.limits`   | The resources limits for the auto-discovery init container                                                                                | `{}`                   |
+| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container                                                                             | `{}`                   |
+| `externalAccess.service.type`                     | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP                                                | `LoadBalancer`         |
+| `externalAccess.service.ports.external`           | Kafka port used for external access when service type is LoadBalancer                                                                     | `9094`                 |
+| `externalAccess.service.loadBalancerIPs`          | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount                                                 | `[]`                   |
+| `externalAccess.service.loadBalancerNames`        | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount                                               | `[]`                   |
+| `externalAccess.service.loadBalancerAnnotations`  | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount                                         | `[]`                   |
+| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer                                                                                 | `[]`                   |
+| `externalAccess.service.nodePorts`                | Array of node ports used for each Kafka broker. Length must be the same as replicaCount                                                   | `[]`                   |
+| `externalAccess.service.externalIPs`              | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]`                   |
+| `externalAccess.service.useHostIPs`               | Use service host IPs to configure Kafka external listener when service type is NodePort                                                   | `false`                |
+| `externalAccess.service.usePodIPs`                | using the MY_POD_IP address for external access.                                                                                          | `false`                |
+| `externalAccess.service.domain`                   | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP                                | `""`                   |
+| `externalAccess.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready                  | `false`                |
+| `externalAccess.service.labels`                   | Service labels for external access                                                                                                        | `{}`                   |
+| `externalAccess.service.annotations`              | Service annotations for external access                                                                                                   | `{}`                   |
+| `externalAccess.service.extraPorts`               | Extra ports to expose in the Kafka external service                                                                                       | `[]`                   |
+| `networkPolicy.enabled`                           | Specifies whether a NetworkPolicy should be created                                                                                       | `false`                |
+| `networkPolicy.allowExternal`                     | Don't require client label for connections                                                                                                | `true`                 |
+| `networkPolicy.explicitNamespacesSelector`        | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed                                            | `{}`                   |
+| `networkPolicy.externalAccess.from`               | customize the from section for External Access on tcp-external port                                                                       | `[]`                   |
+| `networkPolicy.egressRules.customRules`           | Custom network policy rule                                                                                                                | `{}`                   |
+
+### Persistence parameters
+
+| Name                           | Description                                                                                                                            | Value                     |
+| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- |
+| `persistence.enabled`          | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected                                                 | `true`                    |
+| `persistence.existingClaim`    | A manually managed Persistent Volume and Claim                                                                                         | `""`                      |
+| `persistence.storageClass`     | PVC Storage Class for Kafka data volume                                                                                                | `""`                      |
+| `persistence.accessModes`      | Persistent Volume Access Modes                                                                                                         | `["ReadWriteOnce"]`       |
+| `persistence.size`             | PVC Storage Request for Kafka data volume                                                                                              | `8Gi`                     |
+| `persistence.annotations`      | Annotations for the PVC                                                                                                                | `{}`                      |
+| `persistence.labels`           | Labels for the PVC                                                                                                                     | `{}`                      |
+| `persistence.selector`         | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it     | `{}`                      |
+| `persistence.mountPath`        | Mount path of the Kafka data volume                                                                                                    | `/bitnami/kafka`          |
+| `logPersistence.enabled`       | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected                                                 | `false`                   |
+| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim                                                                                         | `""`                      |
+| `logPersistence.storageClass`  | PVC Storage Class for Kafka logs volume                                                                                                | `""`                      |
+| `logPersistence.accessModes`   | Persistent Volume Access Modes                                                                                                         | `["ReadWriteOnce"]`       |
+| `logPersistence.size`          | PVC Storage Request for Kafka logs volume                                                                                              | `8Gi`                     |
+| `logPersistence.annotations`   | Annotations for the PVC                                                                                                                | `{}`                      |
+| `logPersistence.selector`      | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}`                      |
+| `logPersistence.mountPath`     | Mount path of the Kafka logs volume                                                                                                    | `/opt/bitnami/kafka/logs` |
+
+### Volume Permissions parameters
+
+| Name                                                   | Description                                                                                                                       | Value                   |
+| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled`                            | Enable init container that changes the owner and group of the persistent volume                                                   | `false`                 |
+| `volumePermissions.image.registry`                     | Init container volume-permissions image registry                                                                                  | `docker.io`             |
+| `volumePermissions.image.repository`                   | Init container volume-permissions image repository                                                                                | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag`                          | Init container volume-permissions image tag (immutable tags are recommended)                                                      | `11-debian-11-r123`     |
+| `volumePermissions.image.digest`                       | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""`                    |
+| `volumePermissions.image.pullPolicy`                   | Init container volume-permissions image pull policy                                                                               | `IfNotPresent`          |
+| `volumePermissions.image.pullSecrets`                  | Init container volume-permissions image pull secrets                                                                              | `[]`                    |
+| `volumePermissions.resources.limits`                   | Init container volume-permissions resource limits                                                                                 | `{}`                    |
+| `volumePermissions.resources.requests`                 | Init container volume-permissions resource requests                                                                               | `{}`                    |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container                                                                                                    | `0`                     |
+
+### Other Parameters
+
+| Name                                          | Description                                                                                    | Value   |
+| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- |
+| `serviceAccount.create`                       | Enable creation of ServiceAccount for Kafka pods                                               | `true`  |
+| `serviceAccount.name`                         | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""`    |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created                         | `true`  |
+| `serviceAccount.annotations`                  | Additional custom annotations for the ServiceAccount                                           | `{}`    |
+| `rbac.create`                                 | Whether to create & use RBAC resources or not                                                  | `false` |
+
+### Metrics parameters
+
+| Name                                                        | Description                                                                                                                      | Value                                                                                   |
+| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
+| `metrics.kafka.enabled`                                     | Whether or not to create a standalone Kafka exporter to expose Kafka metrics                                                     | `false`                                                                                 |
+| `metrics.kafka.image.registry`                              | Kafka exporter image registry                                                                                                    | `docker.io`                                                                             |
+| `metrics.kafka.image.repository`                            | Kafka exporter image repository                                                                                                  | `bitnami/kafka-exporter`                                                                |
+| `metrics.kafka.image.tag`                                   | Kafka exporter image tag (immutable tags are recommended)                                                                        | `1.7.0-debian-11-r4`                                                                    |
+| `metrics.kafka.image.digest`                                | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                   | `""`                                                                                    |
+| `metrics.kafka.image.pullPolicy`                            | Kafka exporter image pull policy                                                                                                 | `IfNotPresent`                                                                          |
+| `metrics.kafka.image.pullSecrets`                           | Specify docker-registry secret names as an array                                                                                 | `[]`                                                                                    |
+| `metrics.kafka.certificatesSecret`                          | Name of the existing secret containing the optional certificate and key files                                                    | `""`                                                                                    |
+| `metrics.kafka.tlsCert`                                     | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file)                           | `cert-file`                                                                             |
+| `metrics.kafka.tlsKey`                                      | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file)                             | `key-file`                                                                              |
+| `metrics.kafka.tlsCaSecret`                                 | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication                      | `""`                                                                                    |
+| `metrics.kafka.tlsCaCert`                                   | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file)                  | `ca-file`                                                                               |
+| `metrics.kafka.extraFlags`                                  | Extra flags to be passed to Kafka exporter                                                                                       | `{}`                                                                                    |
+| `metrics.kafka.command`                                     | Override Kafka exporter container command                                                                                        | `[]`                                                                                    |
+| `metrics.kafka.args`                                        | Override Kafka exporter container arguments                                                                                      | `[]`                                                                                    |
+| `metrics.kafka.containerPorts.metrics`                      | Kafka exporter metrics container port                                                                                            | `9308`                                                                                  |
+| `metrics.kafka.resources.limits`                            | The resources limits for the container                                                                                           | `{}`                                                                                    |
+| `metrics.kafka.resources.requests`                          | The requested resources for the container                                                                                        | `{}`                                                                                    |
+| `metrics.kafka.podSecurityContext.enabled`                  | Enable security context for the pods                                                                                             | `true`                                                                                  |
+| `metrics.kafka.podSecurityContext.fsGroup`                  | Set Kafka exporter pod's Security Context fsGroup                                                                                | `1001`                                                                                  |
+| `metrics.kafka.containerSecurityContext.enabled`            | Enable Kafka exporter containers' Security Context                                                                               | `true`                                                                                  |
+| `metrics.kafka.containerSecurityContext.runAsUser`          | Set Kafka exporter containers' Security Context runAsUser                                                                        | `1001`                                                                                  |
+| `metrics.kafka.containerSecurityContext.runAsNonRoot`       | Set Kafka exporter containers' Security Context runAsNonRoot                                                                     | `true`                                                                                  |
+| `metrics.kafka.hostAliases`                                 | Kafka exporter pods host aliases                                                                                                 | `[]`                                                                                    |
+| `metrics.kafka.podLabels`                                   | Extra labels for Kafka exporter pods                                                                                             | `{}`                                                                                    |
+| `metrics.kafka.podAnnotations`                              | Extra annotations for Kafka exporter pods                                                                                        | `{}`                                                                                    |
+| `metrics.kafka.podAffinityPreset`                           | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`                                | `""`                                                                                    |
+| `metrics.kafka.podAntiAffinityPreset`                       | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`                           | `soft`                                                                                  |
+| `metrics.kafka.nodeAffinityPreset.type`                     | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard`                          | `""`                                                                                    |
+| `metrics.kafka.nodeAffinityPreset.key`                      | Node label key to match Ignored if `metrics.kafka.affinity` is set.                                                              | `""`                                                                                    |
+| `metrics.kafka.nodeAffinityPreset.values`                   | Node label values to match. Ignored if `metrics.kafka.affinity` is set.                                                          | `[]`                                                                                    |
+| `metrics.kafka.affinity`                                    | Affinity for pod assignment                                                                                                      | `{}`                                                                                    |
+| `metrics.kafka.nodeSelector`                                | Node labels for pod assignment                                                                                                   | `{}`                                                                                    |
+| `metrics.kafka.tolerations`                                 | Tolerations for pod assignment                                                                                                   | `[]`                                                                                    |
+| `metrics.kafka.schedulerName`                               | Name of the k8s scheduler (other than default) for Kafka exporter                                                                | `""`                                                                                    |
+| `metrics.kafka.priorityClassName`                           | Kafka exporter pods' priorityClassName                                                                                           | `""`                                                                                    |
+| `metrics.kafka.topologySpreadConstraints`                   | Topology Spread Constraints for pod assignment                                                                                   | `[]`                                                                                    |
+| `metrics.kafka.extraVolumes`                                | Optionally specify extra list of additional volumes for the Kafka exporter pod(s)                                                | `[]`                                                                                    |
+| `metrics.kafka.extraVolumeMounts`                           | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s)                                     | `[]`                                                                                    |
+| `metrics.kafka.sidecars`                                    | Add additional sidecar containers to the Kafka exporter pod(s)                                                                   | `[]`                                                                                    |
+| `metrics.kafka.initContainers`                              | Add init containers to the Kafka exporter pods                                                                                   | `[]`                                                                                    |
+| `metrics.kafka.service.ports.metrics`                       | Kafka exporter metrics service port                                                                                              | `9308`                                                                                  |
+| `metrics.kafka.service.clusterIP`                           | Static clusterIP or None for headless services                                                                                   | `""`                                                                                    |
+| `metrics.kafka.service.sessionAffinity`                     | Control where client requests go, to the same pod or round-robin                                                                 | `None`                                                                                  |
+| `metrics.kafka.service.annotations`                         | Annotations for the Kafka exporter service                                                                                       | `{}`                                                                                    |
+| `metrics.kafka.serviceAccount.create`                       | Enable creation of ServiceAccount for Kafka exporter pods                                                                        | `true`                                                                                  |
+| `metrics.kafka.serviceAccount.name`                         | The name of the service account to use. If not set and `create` is `true`, a name is generated                                   | `""`                                                                                    |
+| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created                                                           | `true`                                                                                  |
+| `metrics.jmx.enabled`                                       | Whether or not to expose JMX metrics to Prometheus                                                                               | `false`                                                                                 |
+| `metrics.jmx.image.registry`                                | JMX exporter image registry                                                                                                      | `docker.io`                                                                             |
+| `metrics.jmx.image.repository`                              | JMX exporter image repository                                                                                                    | `bitnami/jmx-exporter`                                                                  |
+| `metrics.jmx.image.tag`                                     | JMX exporter image tag (immutable tags are recommended)                                                                          | `0.18.0-debian-11-r27`                                                                  |
+| `metrics.jmx.image.digest`                                  | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                     | `""`                                                                                    |
+| `metrics.jmx.image.pullPolicy`                              | JMX exporter image pull policy                                                                                                   | `IfNotPresent`                                                                          |
+| `metrics.jmx.image.pullSecrets`                             | Specify docker-registry secret names as an array                                                                                 | `[]`                                                                                    |
+| `metrics.jmx.containerSecurityContext.enabled`              | Enable Prometheus JMX exporter containers' Security Context                                                                      | `true`                                                                                  |
+| `metrics.jmx.containerSecurityContext.runAsUser`            | Set Prometheus JMX exporter containers' Security Context runAsUser                                                               | `1001`                                                                                  |
+| `metrics.jmx.containerSecurityContext.runAsNonRoot`         | Set Prometheus JMX exporter containers' Security Context runAsNonRoot                                                            | `true`                                                                                  |
+| `metrics.jmx.containerPorts.metrics`                        | Prometheus JMX exporter metrics container port                                                                                   | `5556`                                                                                  |
+| `metrics.jmx.resources.limits`                              | The resources limits for the JMX exporter container                                                                              | `{}`                                                                                    |
+| `metrics.jmx.resources.requests`                            | The requested resources for the JMX exporter container                                                                           | `{}`                                                                                    |
+| `metrics.jmx.service.ports.metrics`                         | Prometheus JMX exporter metrics service port                                                                                     | `5556`                                                                                  |
+| `metrics.jmx.service.clusterIP`                             | Static clusterIP or None for headless services                                                                                   | `""`                                                                                    |
+| `metrics.jmx.service.sessionAffinity`                       | Control where client requests go, to the same pod or round-robin                                                                 | `None`                                                                                  |
+| `metrics.jmx.service.annotations`                           | Annotations for the Prometheus JMX exporter service                                                                              | `{}`                                                                                    |
+| `metrics.jmx.whitelistObjectNames`                          | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter                                             | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` |
+| `metrics.jmx.config`                                        | Configuration file for JMX exporter                                                                                              | `""`                                                                                    |
+| `metrics.jmx.existingConfigmap`                             | Name of existing ConfigMap with JMX exporter configuration                                                                       | `""`                                                                                    |
+| `metrics.jmx.extraRules`                                    | Add extra rules to JMX exporter configuration                                                                                    | `""`                                                                                    |
+| `metrics.serviceMonitor.enabled`                            | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false`                                                                                 |
+| `metrics.serviceMonitor.namespace`                          | Namespace in which Prometheus is running                                                                                         | `""`                                                                                    |
+| `metrics.serviceMonitor.interval`                           | Interval at which metrics should be scraped                                                                                      | `""`                                                                                    |
+| `metrics.serviceMonitor.scrapeTimeout`                      | Timeout after which the scrape is ended                                                                                          | `""`                                                                                    |
+| `metrics.serviceMonitor.labels`                             | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus                                            | `{}`                                                                                    |
+| `metrics.serviceMonitor.selector`                           | Prometheus instance selector labels                                                                                              | `{}`                                                                                    |
+| `metrics.serviceMonitor.relabelings`                        | RelabelConfigs to apply to samples before scraping                                                                               | `[]`                                                                                    |
+| `metrics.serviceMonitor.metricRelabelings`                  | MetricRelabelConfigs to apply to samples before ingestion                                                                        | `[]`                                                                                    |
+| `metrics.serviceMonitor.honorLabels`                        | Specify honorLabels parameter to add the scrape endpoint                                                                         | `false`                                                                                 |
+| `metrics.serviceMonitor.jobLabel`                           | The name of the label on the target service to use as the job name in prometheus.                                                | `""`                                                                                    |
+| `metrics.prometheusRule.enabled`                            | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false`                                                                                 |
+| `metrics.prometheusRule.namespace`                          | Namespace in which Prometheus is running                                                                                         | `""`                                                                                    |
+| `metrics.prometheusRule.labels`                             | Additional labels that can be used so PrometheusRule will be discovered by Prometheus                                            | `{}`                                                                                    |
+| `metrics.prometheusRule.groups`                             | Prometheus Rule Groups for Kafka                                                                                                 | `[]`                                                                                    |
+
+### Kafka provisioning parameters
+
+| Name                                                       | Description                                                                                                                   | Value                 |
+| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- |
+| `provisioning.enabled`                                     | Enable kafka provisioning Job                                                                                                 | `false`               |
+| `provisioning.numPartitions`                               | Default number of partitions for topics when unspecified                                                                      | `1`                   |
+| `provisioning.replicationFactor`                           | Default replication factor for topics when unspecified                                                                        | `1`                   |
+| `provisioning.topics`                                      | Kafka topics to provision                                                                                                     | `[]`                  |
+| `provisioning.nodeSelector`                                | Node labels for pod assignment                                                                                                | `{}`                  |
+| `provisioning.tolerations`                                 | Tolerations for pod assignment                                                                                                | `[]`                  |
+| `provisioning.extraProvisioningCommands`                   | Extra commands to run to provision cluster resources                                                                          | `[]`                  |
+| `provisioning.parallel`                                    | Number of provisioning commands to run at the same time                                                                       | `1`                   |
+| `provisioning.preScript`                                   | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations   | `""`                  |
+| `provisioning.postScript`                                  | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations    | `""`                  |
+| `provisioning.auth.tls.type`                               | Format to use for TLS certificates. Allowed types: `jks` and `pem`.                                                           | `jks`                 |
+| `provisioning.auth.tls.certificatesSecret`                 | Existing secret containing the TLS certificates for the Kafka provisioning Job.                                               | `""`                  |
+| `provisioning.auth.tls.cert`                               | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt)                                 | `tls.crt`             |
+| `provisioning.auth.tls.key`                                | The secret key from the certificatesSecret if 'key' key different from the default (tls.key)                                  | `tls.key`             |
+| `provisioning.auth.tls.caCert`                             | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt)                                | `ca.crt`              |
+| `provisioning.auth.tls.keystore`                           | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks)                        | `keystore.jks`        |
+| `provisioning.auth.tls.truststore`                         | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks)                    | `truststore.jks`      |
+| `provisioning.auth.tls.passwordsSecret`                    | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected.                  | `""`                  |
+| `provisioning.auth.tls.keyPasswordSecretKey`               | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password)               | `key-password`        |
+| `provisioning.auth.tls.keystorePasswordSecretKey`          | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password)     | `keystore-password`   |
+| `provisioning.auth.tls.truststorePasswordSecretKey`        | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` |
+| `provisioning.auth.tls.keyPassword`                        | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided.                     | `""`                  |
+| `provisioning.auth.tls.keystorePassword`                   | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided.                                                | `""`                  |
+| `provisioning.auth.tls.truststorePassword`                 | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided.                                              | `""`                  |
+| `provisioning.command`                                     | Override provisioning container command                                                                                       | `[]`                  |
+| `provisioning.args`                                        | Override provisioning container arguments                                                                                     | `[]`                  |
+| `provisioning.extraEnvVars`                                | Extra environment variables to add to the provisioning pod                                                                    | `[]`                  |
+| `provisioning.extraEnvVarsCM`                              | ConfigMap with extra environment variables                                                                                    | `""`                  |
+| `provisioning.extraEnvVarsSecret`                          | Secret with extra environment variables                                                                                       | `""`                  |
+| `provisioning.podAnnotations`                              | Extra annotations for Kafka provisioning pods                                                                                 | `{}`                  |
+| `provisioning.podLabels`                                   | Extra labels for Kafka provisioning pods                                                                                      | `{}`                  |
+| `provisioning.serviceAccount.create`                       | Enable creation of ServiceAccount for Kafka provisioning pods                                                                 | `false`               |
+| `provisioning.serviceAccount.name`                         | The name of the service account to use. If not set and `create` is `true`, a name is generated                                | `""`                  |
+| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created                                                        | `true`                |
+| `provisioning.resources.limits`                            | The resources limits for the Kafka provisioning container                                                                     | `{}`                  |
+| `provisioning.resources.requests`                          | The requested resources for the Kafka provisioning container                                                                  | `{}`                  |
+| `provisioning.podSecurityContext.enabled`                  | Enable security context for the pods                                                                                          | `true`                |
+| `provisioning.podSecurityContext.fsGroup`                  | Set Kafka provisioning pod's Security Context fsGroup                                                                         | `1001`                |
+| `provisioning.containerSecurityContext.enabled`            | Enable Kafka provisioning containers' Security Context                                                                        | `true`                |
+| `provisioning.containerSecurityContext.runAsUser`          | Set Kafka provisioning containers' Security Context runAsUser                                                                 | `1001`                |
+| `provisioning.containerSecurityContext.runAsNonRoot`       | Set Kafka provisioning containers' Security Context runAsNonRoot                                                              | `true`                |
+| `provisioning.schedulerName`                               | Name of the k8s scheduler (other than default) for kafka provisioning                                                         | `""`                  |
+| `provisioning.extraVolumes`                                | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s)                                         | `[]`                  |
+| `provisioning.extraVolumeMounts`                           | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s)                              | `[]`                  |
+| `provisioning.sidecars`                                    | Add additional sidecar containers to the Kafka provisioning pod(s)                                                            | `[]`                  |
+| `provisioning.initContainers`                              | Add additional Add init containers to the Kafka provisioning pod(s)                                                           | `[]`                  |
+| `provisioning.waitForKafka`                                | If true use an init container to wait until kafka is ready before starting provisioning                                       | `true`                |
+
+### Kraft chart parameters
+
+| Name                            | Description                                                                             | Value                    |
+| ------------------------------- | --------------------------------------------------------------------------------------- | ------------------------ |
+| `kraft.enabled`                 | Switch to enable or disable the Kraft mode for Kafka                                    | `true`                   |
+| `kraft.processRoles`            | Roles of your Kafka nodes. Nodes can have 'broker', 'controller' roles or both of them. | `broker,controller`      |
+| `kraft.controllerListenerNames` | Controller listener names                                                               | `CONTROLLER`             |
+| `kraft.clusterId`               | Kafka ClusterID. You must set it if your cluster contains more than one node.           | `kafka_cluster_id_test1` |
+| `kraft.controllerQuorumVoters`  | Quorum voters of Kafka Kraft cluster. Use it for nodes with 'broker' role only.         | `""`                     |
+
+### ZooKeeper chart parameters
+
+| Name                                    | Description                                                                                                                                                             | Value               |
+| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
+| `zookeeper.enabled`                     | Switch to enable or disable the ZooKeeper helm chart. Must be false if you use Kraft mode.                                                                              | `false`             |
+| `zookeeper.replicaCount`                | Number of ZooKeeper nodes                                                                                                                                               | `1`                 |
+| `zookeeper.auth.client.enabled`         | Enable ZooKeeper auth                                                                                                                                                   | `false`             |
+| `zookeeper.auth.client.clientUser`      | User that will use ZooKeeper clients to auth                                                                                                                            | `""`                |
+| `zookeeper.auth.client.clientPassword`  | Password that will use ZooKeeper clients to auth                                                                                                                        | `""`                |
+| `zookeeper.auth.client.serverUsers`     | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin"                                         | `""`                |
+| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""`                |
+| `zookeeper.persistence.enabled`         | Enable persistence on ZooKeeper using PVC(s)                                                                                                                            | `true`              |
+| `zookeeper.persistence.storageClass`    | Persistent Volume storage class                                                                                                                                         | `""`                |
+| `zookeeper.persistence.accessModes`     | Persistent Volume access modes                                                                                                                                          | `["ReadWriteOnce"]` |
+| `zookeeper.persistence.size`            | Persistent Volume size                                                                                                                                                  | `8Gi`               |
+| `externalZookeeper.servers`             | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use Kraft mode.                               | `[]`                |
+
+```console
+helm install my-release \
+  --set replicaCount=3 \
+  oci://registry-1.docker.io/bitnamicharts/kafka
+```
+
+The above command deploys Kafka with 3 brokers (replicas).
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/kafka
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Setting custom parameters
+
+Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property.
+
+Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`.
+Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior.
+
+### Listeners configuration
+
+This chart allows you to automatically configure Kafka with 3 listeners:
+
+- One for inter-broker communications.
+- A second one for communications with clients within the K8s cluster.
+- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information.
+
+For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed.
+
+### Enable security for Kafka and Zookeeper
+
+You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide:
+
+| Method    | Authentication               | Encryption via TLS |
+|-----------|------------------------------|--------------------|
+| plaintext | None                         | No                 |
+| tls       | None                         | Yes                |
+| mtls      | Yes (two-way authentication) | Yes                |
+| sasl      | Yes (via SASL)               | No                 |
+| sasl_tls  | Yes (via SASL)               | Yes                |
+
+Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/).
+
+If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below:
+
+- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients.
+- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`:  when enabling SASL authentication for inter-broker communications.
+- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled.
+
+In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `auth.tls.existingSecrets` parameter when deploying the chart.
+
+> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.tls.password` parameter to provide your password.
+
+For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets:
+
+```console
+kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks
+kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks
+```
+
+> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation.
+
+If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters:
+
+- `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password`
+- `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`).
+- `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers
+
+> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain)
+> **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password`
+
+You can deploy the chart with authentication using the following parameters:
+
+```console
+replicaCount=2
+auth.clientProtocol=sasl
+auth.interBrokerProtocol=tls
+auth.tls.existingSecrets[0]=kafka-jks-0
+auth.tls.existingSecrets[1]=kafka-jks-1
+auth.tls.password=jksPassword
+auth.sasl.jaas.clientUsers[0]=brokerUser
+auth.sasl.jaas.clientPasswords[0]=brokerPassword
+auth.sasl.jaas.zookeeperUser=zookeeperUser
+auth.sasl.jaas.zookeeperPassword=zookeeperPassword
+zookeeper.auth.enabled=true
+zookeeper.auth.serverUsers=zookeeperUser
+zookeeper.auth.serverPasswords=zookeeperPassword
+zookeeper.auth.clientUser=zookeeperUser
+zookeeper.auth.clientPassword=zookeeperPassword
+```
+
+You can deploy the chart with AclAuthorizer using the following parameters:
+
+```console
+replicaCount=2
+auth.clientProtocol=sasl
+auth.interBrokerProtocol=sasl_tls
+auth.tls.existingSecrets[0]=kafka-jks-0
+auth.tls.existingSecrets[1]=kafka-jks-1
+auth.tls.password=jksPassword
+auth.sasl.jaas.clientUsers[0]=brokerUser
+auth.sasl.jaas.clientPasswords[0]=brokerPassword
+auth.sasl.jaas.zookeeperUser=zookeeperUser
+auth.sasl.jaas.zookeeperPassword=zookeeperPassword
+zookeeper.auth.enabled=true
+zookeeper.auth.serverUsers=zookeeperUser
+zookeeper.auth.serverPasswords=zookeeperPassword
+zookeeper.auth.clientUser=zookeeperUser
+zookeeper.auth.clientPassword=zookeeperPassword
+authorizerClassName=kafka.security.authorizer.AclAuthorizer
+allowEveryoneIfNoAclFound=false
+superUsers=User:admin
+```
+
+If you are using Kafka ACLs, you might encounter in kafka-authorizer.log the following event: `[...] Principal = User:ANONYMOUS is Allowed Operation [...]`.
+
+By setting the following parameter: `auth.clientProtocol=mtls`, it will set the configuration in Kafka to `ssl.client.auth=required`. This option will require the clients to authenticate to Kafka brokers.
+
+As result, we will be able to see in kafka-authorizer.log the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`.
+
+If you also enable exposing metrics using the Kafka exporter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags:
+
+```console
+metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""}
+```
+
+### Accessing Kafka brokers from outside the cluster
+
+In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created.
+
+There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services.
+
+#### Using LoadBalancer services
+
+You have two alternatives to use LoadBalancer services:
+
+- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically.
+
+```console
+externalAccess.enabled=true
+externalAccess.service.type=LoadBalancer
+externalAccess.service.ports.external=9094
+externalAccess.autoDiscovery.enabled=true
+serviceAccount.create=true
+rbac.create=true
+```
+
+Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled.
+
+- Option B) Manually specify the load balancer IPs:
+
+```console
+externalAccess.enabled=true
+externalAccess.service.type=LoadBalancer
+externalAccess.service.ports.external=9094
+externalAccess.service.loadBalancerIPs[0]='external-ip-1'
+externalAccess.service.loadBalancerIPs[1]='external-ip-2'}
+```
+
+Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it.
+
+Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections.
+
+#### Using NodePort services
+
+You have two alternatives to use NodePort services:
+
+- Option A) Use random node ports using an **initContainer** that discover them automatically.
+
+  ```console
+  externalAccess.enabled=true
+  externalAccess.service.type=NodePort
+  externalAccess.autoDiscovery.enabled=true
+  serviceAccount.create=true
+  rbac.create=true
+  ```
+
+  Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled.
+
+- Option B) Manually specify the node ports:
+
+  ```console
+  externalAccess.enabled=true
+  externalAccess.service.type=NodePort
+  externalAccess.service.nodePorts[0]='node-port-1'
+  externalAccess.service.nodePorts[1]='node-port-2'
+  ```
+
+  Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it.
+
+  The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided.
+
+- Option C) Manually specify distinct external IPs
+
+  ```console
+  externalAccess.enabled=true
+  externalAccess.service.type=NodePort
+  externalAccess.service.externalIPs[0]='172.16.0.20'
+  externalAccess.service.externalIPs[1]='172.16.0.21'
+  externalAccess.service.externalIPs[2]='172.16.0.22'
+  ```
+
+  Note: You need to know in advance the available IP of your cluster that will be exposed so each Kafka broker advertised listener is configured with it.
+
+#### Using ClusterIP services
+
+Note: This option requires that an ingress is deployed within your cluster
+
+```console
+externalAccess.enabled=true
+externalAccess.service.type=ClusterIP
+externalAccess.service.ports.external=9094
+externalAccess.service.domain='ingress-ip'
+```
+
+Note: the deployed ingress must contain the following block:
+
+```console
+tcp:
+  9094: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-0-external:9094"
+  9095: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-1-external:9094"
+  9096: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-2-external:9094"
+```
+
+#### Name resolution with External-DNS
+
+You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod:
+
+```yaml
+externalAccess:
+  service:
+    annotations:
+      external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com"
+```
+
+### Sidecars
+
+If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+sidecars:
+  - name: your-image-name
+    image: your-image
+    imagePullPolicy: Always
+    ports:
+      - name: portname
+       containerPort: 1234
+```
+
+### Setting Pod's affinity
+
+This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
+
+### Deploying extra resources
+
+There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB&reg;:
+
+```yaml
+## Extra objects to deploy (value evaluated as a template)
+##
+extraDeploy:
+  - |
+    apiVersion: apps/v1
+    kind: Deployment
+    metadata:
+      name: {{ include "kafka.fullname" . }}-connect
+      labels: {{- include "common.labels.standard" . | nindent 4 }}
+        app.kubernetes.io/component: connector
+    spec:
+      replicas: 1
+      selector:
+        matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+          app.kubernetes.io/component: connector
+      template:
+        metadata:
+          labels: {{- include "common.labels.standard" . | nindent 8 }}
+            app.kubernetes.io/component: connector
+        spec:
+          containers:
+            - name: connect
+              image: KAFKA-CONNECT-IMAGE
+              imagePullPolicy: IfNotPresent
+              ports:
+                - name: connector
+                  containerPort: 8083
+              volumeMounts:
+                - name: configuration
+                  mountPath: /bitnami/kafka/config
+          volumes:
+            - name: configuration
+              configMap:
+                name: {{ include "kafka.fullname" . }}-connect
+  - |
+    apiVersion: v1
+    kind: ConfigMap
+    metadata:
+      name: {{ include "kafka.fullname" . }}-connect
+      labels: {{- include "common.labels.standard" . | nindent 4 }}
+        app.kubernetes.io/component: connector
+    data:
+      connect-standalone.properties: |-
+        bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }}
+        ...
+      mongodb.properties: |-
+        connection.uri=mongodb://root:password@mongodb-hostname:27017
+        ...
+  - |
+    apiVersion: v1
+    kind: Service
+    metadata:
+      name: {{ include "kafka.fullname" . }}-connect
+      labels: {{- include "common.labels.standard" . | nindent 4 }}
+        app.kubernetes.io/component: connector
+    spec:
+      ports:
+        - protocol: TCP
+          port: 8083
+          targetPort: connector
+      selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+        app.kubernetes.io/component: connector
+```
+
+You can create the Kafka Connect image using the Dockerfile below:
+
+```Dockerfile
+FROM bitnami/kafka:latest
+# Download MongoDB&reg; Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb
+RUN mkdir -p /opt/bitnami/kafka/plugins && \
+    cd /opt/bitnami/kafka/plugins && \
+    curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar
+CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties
+```
+
+## Persistence
+
+The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence.
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 22.0.0
+
+This major updates the Kafka's configuration to use Kraft by default. You can learn more about this configuration [here](https://developer.confluent.io/learn/kraft). Apart from seting the `kraft.enabled` parameter to `true`, we also made the following changes:
+
+- Renamed `minBrokerId` parameter to `minId` to set the minimum ID to use when configuring the node.id or broker.id parameter depending on the Kafka's configuration. This parameter sets the `KAFKA_CFG_NODE_ID` env var in the container.
+- Updated the `containerPorts` and `service.ports` parameters to include the new controller port.
+
+### To 21.0.0
+
+This major updates Kafka to its newest version, 3.4.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/34/documentation.html#upgrade).
+
+### To 20.0.0
+
+This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100).
+
+### To 19.0.0
+
+This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade).
+
+### To 18.0.0
+
+This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000).
+
+### To 16.0.0
+
+This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900).
+
+### To 15.0.0
+
+This major release bumps Kafka major version to `3.x` series.
+It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are:
+
+- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map.
+- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map.
+- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map.
+- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map.
+- Several parameters marked as deprecated `14.x.x` are not supported anymore.
+
+Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes.
+
+### To 14.0.0
+
+In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one
+
+```yaml
+image:
+  registry: docker.io
+  repository: bitnami/kafka
+  tag: 2.8.0
+```
+
+VS
+
+```yaml
+image:
+  registry: docker.io
+  repository: bitnami/kafka
+  tag: 2.8.0
+...
+provisioning:
+  image:
+    registry: docker.io
+    repository: bitnami/kafka
+    tag: 2.8.0
+```
+
+See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes
+
+### To 13.0.0
+
+This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700).
+
+### To 12.2.0
+
+This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
+
+### To 12.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Move dependency information from the *requirements.yaml* to the *Chart.yaml*
+- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock*
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+### To 11.8.0
+
+External access to brokers can now be achieved through the cluster's Kafka service.
+
+- `service.nodePort` -> deprecated  in favor of `service.nodePorts.client` and `service.nodePorts.external`
+
+### To 11.7.0
+
+The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords.
+
+- `auth.jaas.clientUser` (string) -> deprecated  in favor of `auth.jaas.clientUsers` (array).
+- `auth.jaas.clientPassword` (string) -> deprecated  in favor of `auth.jaas.clientPasswords` (array).
+
+### To 11.0.0
+
+The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information.
+
+Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version:
+
+- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters.
+- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters.
+- `auth.certificatesSecret` -> renamed to `auth.jksSecret`.
+- `auth.certificatesPassword` -> renamed to `auth.jksPassword`.
+- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`.
+- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser`
+- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword`
+- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser`
+- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword`
+- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret`
+- `service.sslPort` -> deprecated in favor of `service.internalPort`
+- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort`
+- `metrics.kafka.extraFlag` -> new parameter
+- `metrics.kafka.certificatesSecret` -> new parameter
+
+### To 10.0.0
+
+If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later.
+
+### To 9.0.0
+
+Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version:
+
+```diff
+- securityContext.enabled
+- securityContext.fsGroup
+- securityContext.fsGroup
++ podSecurityContext
+- externalAccess.service.loadBalancerIP
++ externalAccess.service.loadBalancerIPs
+- externalAccess.service.nodePort
++ externalAccess.service.nodePorts
+- metrics.jmx.configMap.enabled
+- metrics.jmx.configMap.overrideConfig
++ metrics.jmx.config
+- metrics.jmx.configMap.overrideName
++ metrics.jmx.existingConfigmap
+```
+
+Ports names were prefixed with the protocol to comply with Istio (see <https://istio.io/docs/ops/deployment/requirements/>).
+
+### To 8.0.0
+
+There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028).
+
+### To 7.0.0
+
+Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments.
+Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka:
+
+```console
+helm upgrade kafka oci://registry-1.docker.io/bitnamicharts/kafka --version 6.1.8 --set metrics.kafka.enabled=false
+helm upgrade kafka oci://registry-1.docker.io/bitnamicharts/kafka --version 7.0.0 --set metrics.kafka.enabled=true
+```
+
+### To 2.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka:
+
+```console
+kubectl delete statefulset kafka-kafka --cascade=false
+kubectl delete statefulset kafka-zookeeper --cascade=false
+```
+
+### To 1.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka:
+
+```console
+kubectl delete statefulset kafka-kafka --cascade=false
+kubectl delete statefulset kafka-zookeeper --cascade=false
+```
+
+## License
+
+Copyright &copy; 2023 VMware, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 22 - 0
kafka/helm/kafka/charts/common/.helmignore

@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 23 - 0
kafka/helm/kafka/charts/common/Chart.yaml

@@ -0,0 +1,23 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.4.0
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+  This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+  url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.4.0

+ 235 - 0
kafka/helm/kafka/charts/common/README.md

@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## TL;DR
+
+```yaml
+dependencies:
+  - name: common
+    version: 1.x.x
+    repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.names.fullname" . }}
+data:
+  myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+  type: string
+  description: Docker registry where the image is located
+  example: docker.io
+
+repository:
+  type: string
+  description: Repository and image name
+  example: bitnami/nginx
+
+tag:
+  type: string
+  description: image tag
+  example: 1.16.1-debian-10-r63
+
+pullPolicy:
+  type: string
+  description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+  type: array
+  items:
+    type: string
+  description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+  type: boolean
+  description: Set to true if you would like to see extra information on logs
+  example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+  type: boolean
+  description: Whether enable persistence.
+  example: true
+
+storageClass:
+  type: string
+  description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+  example: "-"
+
+accessMode:
+  type: string
+  description: Access mode for the Persistent Volume Storage.
+  example: ReadWriteOnce
+
+size:
+  type: string
+  description: Size the Persistent Volume Storage.
+  example: 8Gi
+
+path:
+  type: string
+  description: Path to be persisted.
+  example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+  type: string
+  description: Name of the existing secret.
+  example: mySecret
+keyMapping:
+  description: Mapping between the expected key name and the name of the key in the existing secret.
+  type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+#   password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  labels:
+    app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+  password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+      env:
+        - name: PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+              key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+  password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+    'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+        export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+    'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+        export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 106 - 0
kafka/helm/kafka/charts/common/templates/_affinities.tpl

@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - preference:
+      matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  nodeSelectorTerms:
+    - matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.nodes.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.nodes.hard" . -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - podAffinityTerm:
+      labelSelector:
+        matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+          {{- if not (empty $component) }}
+          {{ printf "app.kubernetes.io/component: %s" $component }}
+          {{- end }}
+          {{- range $key, $value := $extraMatchLabels }}
+          {{ $key }}: {{ $value | quote }}
+          {{- end }}
+      topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  - labelSelector:
+      matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+        {{- if not (empty $component) }}
+        {{ printf "app.kubernetes.io/component: %s" $component }}
+        {{- end }}
+        {{- range $key, $value := $extraMatchLabels }}
+        {{ $key }}: {{ $value | quote }}
+        {{- end }}
+    topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.pods.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.pods.hard" . -}}
+  {{- end -}}
+{{- end -}}

+ 180 - 0
kafka/helm/kafka/charts/common/templates/_capabilities.tpl

@@ -0,0 +1,180 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+    {{- if .Values.global.kubeVersion }}
+    {{- .Values.global.kubeVersion -}}
+    {{- else }}
+    {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+    {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}"  structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
kafka/helm/kafka/charts/common/templates/_errors.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+  - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+  - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+  {{- $validationErrors := join "" .validationErrors -}}
+  {{- if and $validationErrors .context.Release.IsUpgrade -}}
+    {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+    {{- $errorString = print $errorString "\n                 Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+    {{- $errorString = print $errorString "\n                 Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+    {{- $errorString = print $errorString "\n%s" -}}
+    {{- printf $errorString $validationErrors | fail -}}
+  {{- end -}}
+{{- end -}}

+ 80 - 0
kafka/helm/kafka/charts/common/templates/_images.tpl

@@ -0,0 +1,80 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+    {{- if .global.imageRegistry }}
+     {{- $registryName = .global.imageRegistry -}}
+    {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+    {{- $separator = "@" -}}
+    {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+    {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+    {{- printf "%s%s%s"  $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+  {{- $pullSecrets := list }}
+
+  {{- if .global }}
+    {{- range .global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets | uniq }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+  {{- $pullSecrets := list }}
+  {{- $context := .context }}
+
+  {{- if $context.Values.global }}
+    {{- range $context.Values.global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets | uniq }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}

+ 68 - 0
kafka/helm/kafka/charts/common/templates/_ingress.tpl

@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+  - serviceName - String. Name of an existing service backend
+  - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+  name: {{ .serviceName }}
+  port:
+    {{- if typeIs "string" .servicePort }}
+    name: {{ .servicePort }}
+    {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+    number: {{ .servicePort | int }}
+    {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}

+ 18 - 0
kafka/helm/kafka/charts/common/templates/_labels.tpl

@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}

+ 66 - 0
kafka/helm/kafka/charts/common/templates/_names.tpl

@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}

+ 165 - 0
kafka/helm/kafka/charts/common/templates/_secrets.tpl

@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+  {{- if not (typeIs "string" .existingSecret) -}}
+    {{- if .existingSecret.keyMapping -}}
+      {{- $key = index .existingSecret.keyMapping $.key -}}
+    {{- end -}}
+  {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - length - int - Optional - Length of the generated random password.
+  - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+  - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+  - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+  1. Already existing 'Secret' resource
+     (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+  2. Password provided via the values.yaml
+     (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+  3. Randomly generated secret password
+     (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+  {{- if hasKey $secretData .key }}
+    {{- $password = index $secretData .key | quote }}
+  {{- else }}
+    {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+  {{- end -}}
+{{- else if $providedPasswordValue }}
+  {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+  {{- if .context.Values.enabled }}
+    {{- $subchart = $chartName }}
+  {{- end -}}
+
+  {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+  {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+  {{- $passwordValidationErrors := list $requiredPasswordError -}}
+  {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+  {{- if .strong }}
+    {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+    {{- $password = randAscii $passwordLength }}
+    {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+    {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+  {{- else }}
+    {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+  {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+  {{- $value = index $secretData .key -}}
+{{- else -}}
+  {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
kafka/helm/kafka/charts/common/templates/_storage.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return  the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- $storageClass = .global.storageClass -}}
+    {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+  {{- if (eq "-" $storageClass) -}}
+      {{- printf "storageClassName: \"\"" -}}
+  {{- else }}
+      {{- printf "storageClassName: %s" $storageClass -}}
+  {{- end -}}
+{{- end -}}
+
+{{- end -}}

+ 13 - 0
kafka/helm/kafka/charts/common/templates/_tplvalues.tpl

@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}

+ 62 - 0
kafka/helm/kafka/charts/common/templates/_utils.tpl

@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+  {{- $fieldNameSplit := splitList "-" .field -}}
+  {{- $upperCaseFieldNameSplit := list -}}
+
+  {{- range $fieldNameSplit -}}
+    {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+  {{- end -}}
+
+  {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+  {{- if not $latestObj -}}
+    {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+  {{- end -}}
+  {{- $value = ( index $latestObj . ) -}}
+  {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}} 
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+  {{- if $value -}}
+    {{- $key = . }}
+  {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}} 
+{{- end -}}

+ 14 - 0
kafka/helm/kafka/charts/common/templates/_warnings.tpl

@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}

+ 72 - 0
kafka/helm/kafka/charts/common/templates/validations/_cassandra.tpl

@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+  {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+  {{- $enabled := include "common.cassandra.values.enabled" . -}}
+  {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+  {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.dbUser.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.cassandra.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+  {{- if .subchart -}}
+    cassandra.dbUser
+  {{- else -}}
+    dbUser
+  {{- end -}}
+{{- end -}}

+ 103 - 0
kafka/helm/kafka/charts/common/templates/validations/_mariadb.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+  {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mariadb.values.enabled" . -}}
+  {{- $architecture := include "common.mariadb.values.architecture" . -}}
+  {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mariadb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mariadb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 108 - 0
kafka/helm/kafka/charts/common/templates/validations/_mongodb.tpl

@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MongoDB&reg; values are stored, e.g: "mongodb-passwords-secret"
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+  {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mongodb.values.enabled" . -}}
+  {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+  {{- $architecture := include "common.mongodb.values.architecture" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+  {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+  {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+    {{- if and $valueUsername $valueDatabase -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replicaset") -}}
+        {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mongodb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mongodb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}

+ 103 - 0
kafka/helm/kafka/charts/common/templates/validations/_mysql.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+  {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mysql.values.enabled" . -}}
+  {{- $architecture := include "common.mysql.values.architecture" . -}}
+  {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mysql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+  {{- if .subchart -}}
+    mysql.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 129 - 0
kafka/helm/kafka/charts/common/templates/validations/_postgresql.tpl

@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+  {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+  {{- $enabled := include "common.postgresql.values.enabled" . -}}
+  {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+  {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+    {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+    {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+    {{- if (eq $enabledReplication "true") -}}
+        {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+  - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+  {{- if .context.Values.global -}}
+    {{- if .context.Values.global.postgresql -}}
+      {{- index .context.Values.global.postgresql .key | quote -}}
+    {{- end -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+  {{- if .subchart -}}
+    {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+  {{- else -}}
+    {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+  {{- if not $globalValue -}}
+    {{- if .subchart -}}
+      postgresql.postgresqlPassword
+    {{- else -}}
+      postgresqlPassword
+    {{- end -}}
+  {{- else -}}
+    global.postgresql.postgresqlPassword
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+  {{- else -}}
+    {{- printf "%v" .context.Values.replication.enabled -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+  {{- if .subchart -}}
+    postgresql.replication.password
+  {{- else -}}
+    replication.password
+  {{- end -}}
+{{- end -}}

+ 76 - 0
kafka/helm/kafka/charts/common/templates/validations/_redis.tpl

@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+  {{- $enabled := include "common.redis.values.enabled" . -}}
+  {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+  {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+  {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+  {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+  {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+  {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+    {{- if eq $useAuth "true" -}}
+      {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+      {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.redis.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+  {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+  {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+  {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+  {{- if $standarizedAuthValues -}}
+    {{- true -}}
+  {{- end -}}
+{{- end -}}

+ 46 - 0
kafka/helm/kafka/charts/common/templates/validations/_validations.tpl

@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+  {{- range .required -}}
+    {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+  - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+  {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+  {{- if not $value -}}
+    {{- $varname := "my-value" -}}
+    {{- $getCurrentValue := "" -}}
+    {{- if and .secret .field -}}
+      {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+      {{- $getCurrentValue = printf " To get the current value:\n\n        %s\n" (include "common.utils.secret.getvalue" .) -}}
+    {{- end -}}
+    {{- printf "\n    '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+  {{- end -}}
+{{- end -}}

+ 5 - 0
kafka/helm/kafka/charts/common/values.yaml

@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart

+ 21 - 0
kafka/helm/kafka/charts/zookeeper/.helmignore

@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj

+ 6 - 0
kafka/helm/kafka/charts/zookeeper/Chart.lock

@@ -0,0 +1,6 @@
+dependencies:
+- name: common
+  repository: oci://registry-1.docker.io/bitnamicharts
+  version: 2.4.0
+digest: sha256:8c1a5dc923412d11d4d841420494b499cb707305c8b9f87f45ea1a8bf3172cb3
+generated: "2023-05-21T17:05:21.743633346Z"

+ 24 - 0
kafka/helm/kafka/charts/zookeeper/Chart.yaml

@@ -0,0 +1,24 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 3.8.1
+dependencies:
+- name: common
+  repository: oci://registry-1.docker.io/bitnamicharts
+  tags:
+  - bitnami-common
+  version: 2.x.x
+description: Apache ZooKeeper provides a reliable, centralized register of configuration
+  data and services for distributed applications.
+home: https://bitnami.com
+icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png
+keywords:
+- zookeeper
+maintainers:
+- name: VMware, Inc.
+  url: https://github.com/bitnami/charts
+name: zookeeper
+sources:
+- https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
+version: 11.4.2

+ 522 - 0
kafka/helm/kafka/charts/zookeeper/README.md

@@ -0,0 +1,522 @@
+<!--- app-name: Apache ZooKeeper -->
+
+# Apache ZooKeeper packaged by Bitnami
+
+Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications.
+
+[Overview of Apache ZooKeeper](https://zookeeper.apache.org)
+
+Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
+
+## TL;DR
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper
+```
+
+## Introduction
+
+This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper
+```
+
+These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Parameters
+
+### Global parameters
+
+| Name                      | Description                                     | Value |
+| ------------------------- | ----------------------------------------------- | ----- |
+| `global.imageRegistry`    | Global Docker image registry                    | `""`  |
+| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]`  |
+| `global.storageClass`     | Global StorageClass for Persistent Volume(s)    | `""`  |
+
+### Common parameters
+
+| Name                     | Description                                                                                  | Value           |
+| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
+| `kubeVersion`            | Override Kubernetes version                                                                  | `""`            |
+| `nameOverride`           | String to partially override common.names.fullname template (will maintain the release name) | `""`            |
+| `fullnameOverride`       | String to fully override common.names.fullname template                                      | `""`            |
+| `clusterDomain`          | Kubernetes Cluster Domain                                                                    | `cluster.local` |
+| `extraDeploy`            | Extra objects to deploy (evaluated as a template)                                            | `[]`            |
+| `commonLabels`           | Add labels to all the deployed resources                                                     | `{}`            |
+| `commonAnnotations`      | Add annotations to all the deployed resources                                                | `{}`            |
+| `namespaceOverride`      | Override namespace for ZooKeeper resources                                                   | `""`            |
+| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden)      | `false`         |
+| `diagnosticMode.command` | Command to override all containers in the statefulset                                        | `["sleep"]`     |
+| `diagnosticMode.args`    | Args to override all containers in the statefulset                                           | `["infinity"]`  |
+
+### ZooKeeper chart parameters
+
+| Name                          | Description                                                                                                                | Value                   |
+| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `image.registry`              | ZooKeeper image registry                                                                                                   | `docker.io`             |
+| `image.repository`            | ZooKeeper image repository                                                                                                 | `bitnami/zookeeper`     |
+| `image.tag`                   | ZooKeeper image tag (immutable tags are recommended)                                                                       | `3.8.1-debian-11-r36`   |
+| `image.digest`                | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag                  | `""`                    |
+| `image.pullPolicy`            | ZooKeeper image pull policy                                                                                                | `IfNotPresent`          |
+| `image.pullSecrets`           | Specify docker-registry secret names as an array                                                                           | `[]`                    |
+| `image.debug`                 | Specify if debug values should be set                                                                                      | `false`                 |
+| `auth.client.enabled`         | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5                                                     | `false`                 |
+| `auth.client.clientUser`      | User that will use ZooKeeper clients to auth                                                                               | `""`                    |
+| `auth.client.clientPassword`  | Password that will use ZooKeeper clients to auth                                                                           | `""`                    |
+| `auth.client.serverUsers`     | Comma, semicolon or whitespace separated list of user to be created                                                        | `""`                    |
+| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created                                 | `""`                    |
+| `auth.client.existingSecret`  | Use existing secret (ignores previous passwords)                                                                           | `""`                    |
+| `auth.quorum.enabled`         | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5                                                     | `false`                 |
+| `auth.quorum.learnerUser`     | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers.                                           | `""`                    |
+| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers.                                       | `""`                    |
+| `auth.quorum.serverUsers`     | Comma, semicolon or whitespace separated list of users for the quorumServers.                                              | `""`                    |
+| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created                                 | `""`                    |
+| `auth.quorum.existingSecret`  | Use existing secret (ignores previous passwords)                                                                           | `""`                    |
+| `tickTime`                    | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats                                                         | `2000`                  |
+| `initLimit`                   | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader                     | `10`                    |
+| `syncLimit`                   | How far out of date a server can be from a leader                                                                          | `5`                     |
+| `preAllocSize`                | Block size for transaction log file                                                                                        | `65536`                 |
+| `snapCount`                   | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000`                |
+| `maxClientCnxns`              | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble     | `60`                    |
+| `maxSessionTimeout`           | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate                               | `40000`                 |
+| `heapSize`                    | Size (in MB) for the Java Heap options (Xmx and Xms)                                                                       | `1024`                  |
+| `fourlwCommandsWhitelist`     | A list of comma separated Four Letter Words commands that can be executed                                                  | `srvr, mntr, ruok`      |
+| `minServerId`                 | Minimal SERVER_ID value, nodes increment their IDs respectively                                                            | `1`                     |
+| `listenOnAllIPs`              | Allow ZooKeeper to listen for connections from its peers on all available IP addresses                                     | `false`                 |
+| `autopurge.snapRetainCount`   | The most recent snapshots amount (and corresponding transaction logs) to retain                                            | `3`                     |
+| `autopurge.purgeInterval`     | The time interval (in hours) for which the purge task has to be triggered                                                  | `0`                     |
+| `logLevel`                    | Log level for the ZooKeeper server. ERROR by default                                                                       | `ERROR`                 |
+| `jvmFlags`                    | Default JVM flags for the ZooKeeper process                                                                                | `""`                    |
+| `dataLogDir`                  | Dedicated data log directory                                                                                               | `""`                    |
+| `configuration`               | Configure ZooKeeper with a custom zoo.cfg file                                                                             | `""`                    |
+| `existingConfigmap`           | The name of an existing ConfigMap with your custom configuration for ZooKeeper                                             | `""`                    |
+| `extraEnvVars`                | Array with extra environment variables to add to ZooKeeper nodes                                                           | `[]`                    |
+| `extraEnvVarsCM`              | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes                                                   | `""`                    |
+| `extraEnvVarsSecret`          | Name of existing Secret containing extra env vars for ZooKeeper nodes                                                      | `""`                    |
+| `command`                     | Override default container command (useful when using custom images)                                                       | `["/scripts/setup.sh"]` |
+| `args`                        | Override default container args (useful when using custom images)                                                          | `[]`                    |
+
+### Statefulset parameters
+
+| Name                                                | Description                                                                                                                                                                                       | Value           |
+| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `replicaCount`                                      | Number of ZooKeeper nodes                                                                                                                                                                         | `1`             |
+| `containerPorts.client`                             | ZooKeeper client container port                                                                                                                                                                   | `2181`          |
+| `containerPorts.tls`                                | ZooKeeper TLS container port                                                                                                                                                                      | `3181`          |
+| `containerPorts.follower`                           | ZooKeeper follower container port                                                                                                                                                                 | `2888`          |
+| `containerPorts.election`                           | ZooKeeper election container port                                                                                                                                                                 | `3888`          |
+| `livenessProbe.enabled`                             | Enable livenessProbe on ZooKeeper containers                                                                                                                                                      | `true`          |
+| `livenessProbe.initialDelaySeconds`                 | Initial delay seconds for livenessProbe                                                                                                                                                           | `30`            |
+| `livenessProbe.periodSeconds`                       | Period seconds for livenessProbe                                                                                                                                                                  | `10`            |
+| `livenessProbe.timeoutSeconds`                      | Timeout seconds for livenessProbe                                                                                                                                                                 | `5`             |
+| `livenessProbe.failureThreshold`                    | Failure threshold for livenessProbe                                                                                                                                                               | `6`             |
+| `livenessProbe.successThreshold`                    | Success threshold for livenessProbe                                                                                                                                                               | `1`             |
+| `livenessProbe.probeCommandTimeout`                 | Probe command timeout for livenessProbe                                                                                                                                                           | `2`             |
+| `readinessProbe.enabled`                            | Enable readinessProbe on ZooKeeper containers                                                                                                                                                     | `true`          |
+| `readinessProbe.initialDelaySeconds`                | Initial delay seconds for readinessProbe                                                                                                                                                          | `5`             |
+| `readinessProbe.periodSeconds`                      | Period seconds for readinessProbe                                                                                                                                                                 | `10`            |
+| `readinessProbe.timeoutSeconds`                     | Timeout seconds for readinessProbe                                                                                                                                                                | `5`             |
+| `readinessProbe.failureThreshold`                   | Failure threshold for readinessProbe                                                                                                                                                              | `6`             |
+| `readinessProbe.successThreshold`                   | Success threshold for readinessProbe                                                                                                                                                              | `1`             |
+| `readinessProbe.probeCommandTimeout`                | Probe command timeout for readinessProbe                                                                                                                                                          | `2`             |
+| `startupProbe.enabled`                              | Enable startupProbe on ZooKeeper containers                                                                                                                                                       | `false`         |
+| `startupProbe.initialDelaySeconds`                  | Initial delay seconds for startupProbe                                                                                                                                                            | `30`            |
+| `startupProbe.periodSeconds`                        | Period seconds for startupProbe                                                                                                                                                                   | `10`            |
+| `startupProbe.timeoutSeconds`                       | Timeout seconds for startupProbe                                                                                                                                                                  | `1`             |
+| `startupProbe.failureThreshold`                     | Failure threshold for startupProbe                                                                                                                                                                | `15`            |
+| `startupProbe.successThreshold`                     | Success threshold for startupProbe                                                                                                                                                                | `1`             |
+| `customLivenessProbe`                               | Custom livenessProbe that overrides the default one                                                                                                                                               | `{}`            |
+| `customReadinessProbe`                              | Custom readinessProbe that overrides the default one                                                                                                                                              | `{}`            |
+| `customStartupProbe`                                | Custom startupProbe that overrides the default one                                                                                                                                                | `{}`            |
+| `lifecycleHooks`                                    | for the ZooKeeper container(s) to automate configuration before or after startup                                                                                                                  | `{}`            |
+| `resources.limits`                                  | The resources limits for the ZooKeeper containers                                                                                                                                                 | `{}`            |
+| `resources.requests.memory`                         | The requested memory for the ZooKeeper containers                                                                                                                                                 | `256Mi`         |
+| `resources.requests.cpu`                            | The requested cpu for the ZooKeeper containers                                                                                                                                                    | `250m`          |
+| `podSecurityContext.enabled`                        | Enabled ZooKeeper pods' Security Context                                                                                                                                                          | `true`          |
+| `podSecurityContext.fsGroup`                        | Set ZooKeeper pod's Security Context fsGroup                                                                                                                                                      | `1001`          |
+| `containerSecurityContext.enabled`                  | Enabled ZooKeeper containers' Security Context                                                                                                                                                    | `true`          |
+| `containerSecurityContext.runAsUser`                | Set ZooKeeper containers' Security Context runAsUser                                                                                                                                              | `1001`          |
+| `containerSecurityContext.runAsNonRoot`             | Set ZooKeeper containers' Security Context runAsNonRoot                                                                                                                                           | `true`          |
+| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege                                                                                                                                                 | `false`         |
+| `hostAliases`                                       | ZooKeeper pods host aliases                                                                                                                                                                       | `[]`            |
+| `podLabels`                                         | Extra labels for ZooKeeper pods                                                                                                                                                                   | `{}`            |
+| `podAnnotations`                                    | Annotations for ZooKeeper pods                                                                                                                                                                    | `{}`            |
+| `podAffinityPreset`                                 | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                               | `""`            |
+| `podAntiAffinityPreset`                             | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                          | `soft`          |
+| `nodeAffinityPreset.type`                           | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`                                                                                                         | `""`            |
+| `nodeAffinityPreset.key`                            | Node label key to match Ignored if `affinity` is set.                                                                                                                                             | `""`            |
+| `nodeAffinityPreset.values`                         | Node label values to match. Ignored if `affinity` is set.                                                                                                                                         | `[]`            |
+| `affinity`                                          | Affinity for pod assignment                                                                                                                                                                       | `{}`            |
+| `nodeSelector`                                      | Node labels for pod assignment                                                                                                                                                                    | `{}`            |
+| `tolerations`                                       | Tolerations for pod assignment                                                                                                                                                                    | `[]`            |
+| `topologySpreadConstraints`                         | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template                                                                          | `[]`            |
+| `podManagementPolicy`                               | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel`      |
+| `priorityClassName`                                 | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand                                                                                   | `""`            |
+| `schedulerName`                                     | Kubernetes pod scheduler registry                                                                                                                                                                 | `""`            |
+| `updateStrategy.type`                               | ZooKeeper statefulset strategy type                                                                                                                                                               | `RollingUpdate` |
+| `updateStrategy.rollingUpdate`                      | ZooKeeper statefulset rolling update configuration parameters                                                                                                                                     | `{}`            |
+| `extraVolumes`                                      | Optionally specify extra list of additional volumes for the ZooKeeper pod(s)                                                                                                                      | `[]`            |
+| `extraVolumeMounts`                                 | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s)                                                                                                           | `[]`            |
+| `sidecars`                                          | Add additional sidecar containers to the ZooKeeper pod(s)                                                                                                                                         | `[]`            |
+| `initContainers`                                    | Add additional init containers to the ZooKeeper pod(s)                                                                                                                                            | `[]`            |
+| `pdb.create`                                        | Deploy a pdb object for the ZooKeeper pod                                                                                                                                                         | `false`         |
+| `pdb.minAvailable`                                  | Minimum available ZooKeeper replicas                                                                                                                                                              | `""`            |
+| `pdb.maxUnavailable`                                | Maximum unavailable ZooKeeper replicas                                                                                                                                                            | `1`             |
+
+### Traffic Exposure parameters
+
+| Name                                        | Description                                                                             | Value       |
+| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- |
+| `service.type`                              | Kubernetes Service type                                                                 | `ClusterIP` |
+| `service.ports.client`                      | ZooKeeper client service port                                                           | `2181`      |
+| `service.ports.tls`                         | ZooKeeper TLS service port                                                              | `3181`      |
+| `service.ports.follower`                    | ZooKeeper follower service port                                                         | `2888`      |
+| `service.ports.election`                    | ZooKeeper election service port                                                         | `3888`      |
+| `service.nodePorts.client`                  | Node port for clients                                                                   | `""`        |
+| `service.nodePorts.tls`                     | Node port for TLS                                                                       | `""`        |
+| `service.disableBaseClientPort`             | Remove client port from service definitions.                                            | `false`     |
+| `service.sessionAffinity`                   | Control where client requests go, to the same pod or round-robin                        | `None`      |
+| `service.sessionAffinityConfig`             | Additional settings for the sessionAffinity                                             | `{}`        |
+| `service.clusterIP`                         | ZooKeeper service Cluster IP                                                            | `""`        |
+| `service.loadBalancerIP`                    | ZooKeeper service Load Balancer IP                                                      | `""`        |
+| `service.loadBalancerSourceRanges`          | ZooKeeper service Load Balancer sources                                                 | `[]`        |
+| `service.externalTrafficPolicy`             | ZooKeeper service external traffic policy                                               | `Cluster`   |
+| `service.annotations`                       | Additional custom annotations for ZooKeeper service                                     | `{}`        |
+| `service.extraPorts`                        | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]`        |
+| `service.headless.annotations`              | Annotations for the Headless Service                                                    | `{}`        |
+| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods         | `true`      |
+| `service.headless.servicenameOverride`      | String to partially override headless service name                                      | `""`        |
+| `networkPolicy.enabled`                     | Specifies whether a NetworkPolicy should be created                                     | `false`     |
+| `networkPolicy.allowExternal`               | Don't require client label for connections                                              | `true`      |
+
+### Other Parameters
+
+| Name                                          | Description                                                            | Value   |
+| --------------------------------------------- | ---------------------------------------------------------------------- | ------- |
+| `serviceAccount.create`                       | Enable creation of ServiceAccount for ZooKeeper pod                    | `false` |
+| `serviceAccount.name`                         | The name of the ServiceAccount to use.                                 | `""`    |
+| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true`  |
+| `serviceAccount.annotations`                  | Additional custom annotations for the ServiceAccount                   | `{}`    |
+
+### Persistence parameters
+
+| Name                                   | Description                                                                    | Value               |
+| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- |
+| `persistence.enabled`                  | Enable ZooKeeper data persistence using PVC. If false, use emptyDir            | `true`              |
+| `persistence.existingClaim`            | Name of an existing PVC to use (only when deploying a single replica)          | `""`                |
+| `persistence.storageClass`             | PVC Storage Class for ZooKeeper data volume                                    | `""`                |
+| `persistence.accessModes`              | PVC Access modes                                                               | `["ReadWriteOnce"]` |
+| `persistence.size`                     | PVC Storage Request for ZooKeeper data volume                                  | `8Gi`               |
+| `persistence.annotations`              | Annotations for the PVC                                                        | `{}`                |
+| `persistence.labels`                   | Labels for the PVC                                                             | `{}`                |
+| `persistence.selector`                 | Selector to match an existing Persistent Volume for ZooKeeper's data PVC       | `{}`                |
+| `persistence.dataLogDir.size`          | PVC Storage Request for ZooKeeper's dedicated data log directory               | `8Gi`               |
+| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""`                |
+| `persistence.dataLogDir.selector`      | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC   | `{}`                |
+
+### Volume Permissions parameters
+
+| Name                                                   | Description                                                                                                                       | Value                   |
+| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `volumePermissions.enabled`                            | Enable init container that changes the owner and group of the persistent volume                                                   | `false`                 |
+| `volumePermissions.image.registry`                     | Init container volume-permissions image registry                                                                                  | `docker.io`             |
+| `volumePermissions.image.repository`                   | Init container volume-permissions image repository                                                                                | `bitnami/bitnami-shell` |
+| `volumePermissions.image.tag`                          | Init container volume-permissions image tag (immutable tags are recommended)                                                      | `11-debian-11-r118`     |
+| `volumePermissions.image.digest`                       | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""`                    |
+| `volumePermissions.image.pullPolicy`                   | Init container volume-permissions image pull policy                                                                               | `IfNotPresent`          |
+| `volumePermissions.image.pullSecrets`                  | Init container volume-permissions image pull secrets                                                                              | `[]`                    |
+| `volumePermissions.resources.limits`                   | Init container volume-permissions resource limits                                                                                 | `{}`                    |
+| `volumePermissions.resources.requests`                 | Init container volume-permissions resource requests                                                                               | `{}`                    |
+| `volumePermissions.containerSecurityContext.enabled`   | Enabled init container Security Context                                                                                           | `true`                  |
+| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container                                                                                                    | `0`                     |
+
+### Metrics parameters
+
+| Name                                       | Description                                                                           | Value       |
+| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- |
+| `metrics.enabled`                          | Enable Prometheus to access ZooKeeper metrics endpoint                                | `false`     |
+| `metrics.containerPort`                    | ZooKeeper Prometheus Exporter container port                                          | `9141`      |
+| `metrics.service.type`                     | ZooKeeper Prometheus Exporter service type                                            | `ClusterIP` |
+| `metrics.service.port`                     | ZooKeeper Prometheus Exporter service port                                            | `9141`      |
+| `metrics.service.annotations`              | Annotations for Prometheus to auto-discover the metrics endpoint                      | `{}`        |
+| `metrics.serviceMonitor.enabled`           | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator         | `false`     |
+| `metrics.serviceMonitor.namespace`         | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)         | `""`        |
+| `metrics.serviceMonitor.interval`          | Interval at which metrics should be scraped.                                          | `""`        |
+| `metrics.serviceMonitor.scrapeTimeout`     | Timeout after which the scrape is ended                                               | `""`        |
+| `metrics.serviceMonitor.additionalLabels`  | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}`        |
+| `metrics.serviceMonitor.selector`          | Prometheus instance selector labels                                                   | `{}`        |
+| `metrics.serviceMonitor.relabelings`       | RelabelConfigs to apply to samples before scraping                                    | `[]`        |
+| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion                             | `[]`        |
+| `metrics.serviceMonitor.honorLabels`       | Specify honorLabels parameter to add the scrape endpoint                              | `false`     |
+| `metrics.serviceMonitor.jobLabel`          | The name of the label on the target service to use as the job name in prometheus.     | `""`        |
+| `metrics.prometheusRule.enabled`           | Create a PrometheusRule for Prometheus Operator                                       | `false`     |
+| `metrics.prometheusRule.namespace`         | Namespace for the PrometheusRule Resource (defaults to the Release Namespace)         | `""`        |
+| `metrics.prometheusRule.additionalLabels`  | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}`        |
+| `metrics.prometheusRule.rules`             | PrometheusRule definitions                                                            | `[]`        |
+
+### TLS/SSL parameters
+
+| Name                                      | Description                                                                                        | Value                                                                 |
+| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- |
+| `tls.client.enabled`                      | Enable TLS for client connections                                                                  | `false`                                                               |
+| `tls.client.auth`                         | SSL Client auth. Can be "none", "want" or "need".                                                  | `none`                                                                |
+| `tls.client.autoGenerated`                | Generate automatically self-signed TLS certificates for ZooKeeper client communications            | `false`                                                               |
+| `tls.client.existingSecret`               | Name of the existing secret containing the TLS certificates for ZooKeeper client communications    | `""`                                                                  |
+| `tls.client.existingSecretKeystoreKey`    | The secret key from the tls.client.existingSecret containing the Keystore.                         | `""`                                                                  |
+| `tls.client.existingSecretTruststoreKey`  | The secret key from the tls.client.existingSecret containing the Truststore.                       | `""`                                                                  |
+| `tls.client.keystorePath`                 | Location of the KeyStore file used for Client connections                                          | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks`   |
+| `tls.client.truststorePath`               | Location of the TrustStore file used for Client connections                                        | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` |
+| `tls.client.passwordsSecretName`          | Existing secret containing Keystore and truststore passwords                                       | `""`                                                                  |
+| `tls.client.passwordsSecretKeystoreKey`   | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore.   | `""`                                                                  |
+| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""`                                                                  |
+| `tls.client.keystorePassword`             | Password to access KeyStore if needed                                                              | `""`                                                                  |
+| `tls.client.truststorePassword`           | Password to access TrustStore if needed                                                            | `""`                                                                  |
+| `tls.quorum.enabled`                      | Enable TLS for quorum protocol                                                                     | `false`                                                               |
+| `tls.quorum.auth`                         | SSL Quorum Client auth. Can be "none", "want" or "need".                                           | `none`                                                                |
+| `tls.quorum.autoGenerated`                | Create self-signed TLS certificates. Currently only supports PEM certificates.                     | `false`                                                               |
+| `tls.quorum.existingSecret`               | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol          | `""`                                                                  |
+| `tls.quorum.existingSecretKeystoreKey`    | The secret key from the tls.quorum.existingSecret containing the Keystore.                         | `""`                                                                  |
+| `tls.quorum.existingSecretTruststoreKey`  | The secret key from the tls.quorum.existingSecret containing the Truststore.                       | `""`                                                                  |
+| `tls.quorum.keystorePath`                 | Location of the KeyStore file used for Quorum protocol                                             | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks`   |
+| `tls.quorum.truststorePath`               | Location of the TrustStore file used for Quorum protocol                                           | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` |
+| `tls.quorum.passwordsSecretName`          | Existing secret containing Keystore and truststore passwords                                       | `""`                                                                  |
+| `tls.quorum.passwordsSecretKeystoreKey`   | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore.   | `""`                                                                  |
+| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""`                                                                  |
+| `tls.quorum.keystorePassword`             | Password to access KeyStore if needed                                                              | `""`                                                                  |
+| `tls.quorum.truststorePassword`           | Password to access TrustStore if needed                                                            | `""`                                                                  |
+| `tls.resources.limits`                    | The resources limits for the TLS init container                                                    | `{}`                                                                  |
+| `tls.resources.requests`                  | The requested resources for the TLS init container                                                 | `{}`                                                                  |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install my-release \
+  --set auth.clientUser=newUser \
+    oci://registry-1.docker.io/bitnamicharts/zookeeper
+```
+
+The above command sets the ZooKeeper user to `newUser`.
+
+> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/zookeeper
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Configure log level
+
+You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs.
+
+In order to remove that log noise so levels can be set to 'INFO', two changes must be made.
+
+First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`.
+
+Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes:
+
+```yaml
+livenessProbe:
+  enabled: false
+readinessProbe:
+  enabled: false
+customLivenessProbe:
+  exec:
+    command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok']
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 6
+customReadinessProbe:
+  exec:
+    command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null']
+  initialDelaySeconds: 5
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 6
+```
+
+You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to
+
+```console
+zookeeper.root.logger=INFO, CONSOLE
+```
+
+the available appender is
+
+- CONSOLE
+- ROLLINGFILE
+- RFAAUDIT
+- TRACEFILE
+
+## Persistence
+
+The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
+
+If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+### Configure the data log directory
+
+You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior).
+
+When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information.
+
+### Set pod affinity
+
+This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
+
+As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
+
+## Troubleshooting
+
+Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
+
+## Upgrading
+
+### To 11.0.0
+
+This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values.
+
+### To 10.0.0
+
+This new version of the chart adds support for server-server authentication.
+The chart previously supported client-server authentication, to avoid confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`.
+
+### To 9.0.0
+
+This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed.
+
+### To 8.0.0
+
+This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository.
+
+Affected values:
+
+- `allowAnonymousLogin` is deprecated.
+- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map.
+- `service.port`, `service.tlsClientPort`, `service.followerPort`, and  `service.electionPort` have been regrouped under the `service.ports` map.
+- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map.
+- `podDisruptionBudget.*` parameters are renamed to `pdb.*`.
+
+### To 7.0.0
+
+This new version renames the parameters used to configure TLS for both client and quorum.
+
+- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort`
+- `service.tls.client_port` is renamed to `service.tlsClientPort`
+- `service.tls.client_enable` is renamed to `tls.client.enabled`
+- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath`
+- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath`
+- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword`
+- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword`
+- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled`
+- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath`
+- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath`
+- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword`
+- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword`
+
+### To 6.1.0
+
+This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
+
+### To 6.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/).
+
+### To 5.21.0
+
+A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones:
+
+- `metrics.port` is renamed to `metrics.containerPort`.
+- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`.
+
+### To 3.0.0
+
+This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade
+of the application, each node will need to have at least one snapshot file created in the data directory. If not, the
+new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056)
+in order to find ways to workaround this issue in case you are facing it.
+
+### To 2.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets.
+Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`:
+
+```console
+kubectl delete statefulset zookeeper-zookeeper --cascade=false
+```
+
+### To 1.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper:
+
+```console
+kubectl delete statefulset zookeeper-zookeeper --cascade=false
+```
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 22 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/.helmignore

@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 23 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/Chart.yaml

@@ -0,0 +1,23 @@
+annotations:
+  category: Infrastructure
+  licenses: Apache-2.0
+apiVersion: v2
+appVersion: 2.4.0
+description: A Library Helm Chart for grouping common logic between bitnami charts.
+  This chart is not deployable by itself.
+home: https://bitnami.com
+icon: https://bitnami.com/downloads/logos/bitnami-mark.png
+keywords:
+- common
+- helper
+- template
+- function
+- bitnami
+maintainers:
+- name: VMware, Inc.
+  url: https://github.com/bitnami/charts
+name: common
+sources:
+- https://github.com/bitnami/charts
+type: library
+version: 2.4.0

+ 235 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/README.md

@@ -0,0 +1,235 @@
+# Bitnami Common Library Chart
+
+A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
+
+Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
+
+## TL;DR
+
+```yaml
+dependencies:
+  - name: common
+    version: 1.x.x
+    repository: oci://registry-1.docker.io/bitnamicharts
+```
+
+```console
+helm dependency update
+```
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.names.fullname" . }}
+data:
+  myvalue: "Hello World"
+```
+
+## Introduction
+
+This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.19+
+- Helm 3.2.0+
+
+## Parameters
+
+## Special input schemas
+
+### ImageRoot
+
+```yaml
+registry:
+  type: string
+  description: Docker registry where the image is located
+  example: docker.io
+
+repository:
+  type: string
+  description: Repository and image name
+  example: bitnami/nginx
+
+tag:
+  type: string
+  description: image tag
+  example: 1.16.1-debian-10-r63
+
+pullPolicy:
+  type: string
+  description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+
+pullSecrets:
+  type: array
+  items:
+    type: string
+  description: Optionally specify an array of imagePullSecrets (evaluated as templates).
+
+debug:
+  type: boolean
+  description: Set to true if you would like to see extra information on logs
+  example: false
+
+## An instance would be:
+# registry: docker.io
+# repository: bitnami/nginx
+# tag: 1.16.1-debian-10-r63
+# pullPolicy: IfNotPresent
+# debug: false
+```
+
+### Persistence
+
+```yaml
+enabled:
+  type: boolean
+  description: Whether enable persistence.
+  example: true
+
+storageClass:
+  type: string
+  description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
+  example: "-"
+
+accessMode:
+  type: string
+  description: Access mode for the Persistent Volume Storage.
+  example: ReadWriteOnce
+
+size:
+  type: string
+  description: Size the Persistent Volume Storage.
+  example: 8Gi
+
+path:
+  type: string
+  description: Path to be persisted.
+  example: /bitnami
+
+## An instance would be:
+# enabled: true
+# storageClass: "-"
+# accessMode: ReadWriteOnce
+# size: 8Gi
+# path: /bitnami
+```
+
+### ExistingSecret
+
+```yaml
+name:
+  type: string
+  description: Name of the existing secret.
+  example: mySecret
+keyMapping:
+  description: Mapping between the expected key name and the name of the key in the existing secret.
+  type: object
+
+## An instance would be:
+# name: mySecret
+# keyMapping:
+#   password: myPasswordKey
+```
+
+#### Example of use
+
+When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
+
+```yaml
+# templates/secret.yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  labels:
+    app: {{ include "common.names.fullname" . }}
+type: Opaque
+data:
+  password: {{ .Values.password | b64enc | quote }}
+
+# templates/dpl.yaml
+---
+...
+      env:
+        - name: PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
+              key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
+...
+
+# values.yaml
+---
+name: mySecret
+keyMapping:
+  password: myPasswordKey
+```
+
+### ValidateValue
+
+#### NOTES.txt
+
+```console
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
+
+{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+```
+
+If we force those values to be empty we will see some alerts
+
+```console
+helm install test mychart --set path.to.value00="",path.to.value01=""
+    'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
+
+        export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
+
+    'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
+
+        export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
+```
+
+## Upgrading
+
+### To 1.0.0
+
+[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
+
+#### What changes were introduced in this major version?
+
+- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
+- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
+- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
+
+#### Considerations when upgrading to this version
+
+- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
+- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
+- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
+
+#### Useful links
+
+- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
+- <https://helm.sh/docs/topics/v2_v3_migration/>
+- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
+
+## License
+
+Copyright &copy; 2023 Bitnami
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+<http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 106 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl

@@ -0,0 +1,106 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - preference:
+      matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  nodeSelectorTerms:
+    - matchExpressions:
+        - key: {{ .key }}
+          operator: In
+          values:
+            {{- range .values }}
+            - {{ . | quote }}
+            {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.nodes.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.nodes.hard" . -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Return a topologyKey definition
+{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
+*/}}
+{{- define "common.affinities.topologyKey" -}}
+{{ .topologyKey | default "kubernetes.io/hostname" -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+  - podAffinityTerm:
+      labelSelector:
+        matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
+          {{- if not (empty $component) }}
+          {{ printf "app.kubernetes.io/component: %s" $component }}
+          {{- end }}
+          {{- range $key, $value := $extraMatchLabels }}
+          {{ $key }}: {{ $value | quote }}
+          {{- end }}
+      topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+    weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+  - labelSelector:
+      matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
+        {{- if not (empty $component) }}
+        {{ printf "app.kubernetes.io/component: %s" $component }}
+        {{- end }}
+        {{- range $key, $value := $extraMatchLabels }}
+        {{ $key }}: {{ $value | quote }}
+        {{- end }}
+    topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+  {{- if eq .type "soft" }}
+    {{- include "common.affinities.pods.soft" . -}}
+  {{- else if eq .type "hard" }}
+    {{- include "common.affinities.pods.hard" . -}}
+  {{- end -}}
+{{- end -}}

+ 180 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl

@@ -0,0 +1,180 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the target Kubernetes version
+*/}}
+{{- define "common.capabilities.kubeVersion" -}}
+{{- if .Values.global }}
+    {{- if .Values.global.kubeVersion }}
+    {{- .Values.global.kubeVersion -}}
+    {{- else }}
+    {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+    {{- end -}}
+{{- else }}
+{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for poddisruptionbudget.
+*/}}
+{{- define "common.capabilities.policy.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "policy/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "common.capabilities.networkPolicy.apiVersion" -}}
+{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob.
+*/}}
+{{- define "common.capabilities.cronjob.apiVersion" -}}
+{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "batch/v1beta1" -}}
+{{- else -}}
+{{- print "batch/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "common.capabilities.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "common.capabilities.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "common.capabilities.statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for ingress.
+*/}}
+{{- define "common.capabilities.ingress.apiVersion" -}}
+{{- if .Values.ingress -}}
+{{- if .Values.ingress.apiVersion -}}
+{{- .Values.ingress.apiVersion -}}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end }}
+{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "networking.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for RBAC resources.
+*/}}
+{{- define "common.capabilities.rbac.apiVersion" -}}
+{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "rbac.authorization.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "rbac.authorization.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for CRDs.
+*/}}
+{{- define "common.capabilities.crd.apiVersion" -}}
+{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiextensions.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiextensions.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for APIService.
+*/}}
+{{- define "common.capabilities.apiService.apiVersion" -}}
+{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "apiregistration.k8s.io/v1beta1" -}}
+{{- else -}}
+{{- print "apiregistration.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Horizontal Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.hpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for Vertical Pod Autoscaler.
+*/}}
+{{- define "common.capabilities.vpa.apiVersion" -}}
+{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
+{{- if .beta2 -}}
+{{- print "autoscaling/v2beta2" -}}
+{{- else -}}
+{{- print "autoscaling/v2beta1" -}}
+{{- end -}}
+{{- else -}}
+{{- print "autoscaling/v2" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the used Helm version is 3.3+.
+A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}"  structure.
+This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
+**To be removed when the catalog's minimun Helm version is 3.3**
+*/}}
+{{- define "common.capabilities.supportsHelmVersion" -}}
+{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_errors.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Through error when upgrading using empty passwords values that must not be empty.
+
+Usage:
+{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
+{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
+
+Required password params:
+  - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
+  - context - Context - Required. Parent context.
+*/}}
+{{- define "common.errors.upgrade.passwords.empty" -}}
+  {{- $validationErrors := join "" .validationErrors -}}
+  {{- if and $validationErrors .context.Release.IsUpgrade -}}
+    {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
+    {{- $errorString = print $errorString "\n                 Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
+    {{- $errorString = print $errorString "\n                 Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
+    {{- $errorString = print $errorString "\n%s" -}}
+    {{- printf $errorString $validationErrors | fail -}}
+  {{- end -}}
+{{- end -}}

+ 80 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_images.tpl

@@ -0,0 +1,80 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return the proper image name
+{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }}
+*/}}
+{{- define "common.images.image" -}}
+{{- $registryName := .imageRoot.registry -}}
+{{- $repositoryName := .imageRoot.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .imageRoot.tag | toString -}}
+{{- if .global }}
+    {{- if .global.imageRegistry }}
+     {{- $registryName = .global.imageRegistry -}}
+    {{- end -}}
+{{- end -}}
+{{- if .imageRoot.digest }}
+    {{- $separator = "@" -}}
+    {{- $termination = .imageRoot.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+    {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else -}}
+    {{- printf "%s%s%s"  $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
+{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
+*/}}
+{{- define "common.images.pullSecrets" -}}
+  {{- $pullSecrets := list }}
+
+  {{- if .global }}
+    {{- range .global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets . -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets | uniq }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names evaluating values as templates
+{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
+*/}}
+{{- define "common.images.renderPullSecrets" -}}
+  {{- $pullSecrets := list }}
+  {{- $context := .context }}
+
+  {{- if $context.Values.global }}
+    {{- range $context.Values.global.imagePullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- range .images -}}
+    {{- range .pullSecrets -}}
+      {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
+    {{- end -}}
+  {{- end -}}
+
+  {{- if (not (empty $pullSecrets)) }}
+imagePullSecrets:
+    {{- range $pullSecrets | uniq }}
+  - name: {{ . }}
+    {{- end }}
+  {{- end }}
+{{- end -}}

+ 68 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl

@@ -0,0 +1,68 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Generate backend entry that is compatible with all Kubernetes API versions.
+
+Usage:
+{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
+
+Params:
+  - serviceName - String. Name of an existing service backend
+  - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.ingress.backend" -}}
+{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
+{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
+serviceName: {{ .serviceName }}
+servicePort: {{ .servicePort }}
+{{- else -}}
+service:
+  name: {{ .serviceName }}
+  port:
+    {{- if typeIs "string" .servicePort }}
+    name: {{ .servicePort }}
+    {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
+    number: {{ .servicePort | int }}
+    {{- end }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Print "true" if the API pathType field is supported
+Usage:
+{{ include "common.ingress.supportsPathType" . }}
+*/}}
+{{- define "common.ingress.supportsPathType" -}}
+{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns true if the ingressClassname field is supported
+Usage:
+{{ include "common.ingress.supportsIngressClassname" . }}
+*/}}
+{{- define "common.ingress.supportsIngressClassname" -}}
+{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
+{{- print "false" -}}
+{{- else -}}
+{{- print "true" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if cert-manager required annotations for TLS signed
+certificates are set in the Ingress annotations
+Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+Usage:
+{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
+*/}}
+{{- define "common.ingress.certManagerRequest" -}}
+{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}

+ 18 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_labels.tpl

@@ -0,0 +1,18 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Kubernetes standard labels
+*/}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+helm.sh/chart: {{ include "common.names.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "common.labels.matchLabels" -}}
+app.kubernetes.io/name: {{ include "common.names.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}

+ 66 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_names.tpl

@@ -0,0 +1,66 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.names.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.names.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.names.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified dependency name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+Usage:
+{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
+*/}}
+{{- define "common.names.dependency.fullname" -}}
+{{- if .chartValues.fullnameOverride -}}
+{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .chartName .chartValues.nameOverride -}}
+{{- if contains $name .context.Release.Name -}}
+{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
+*/}}
+{{- define "common.names.namespace" -}}
+{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified app name adding the installation's namespace.
+*/}}
+{{- define "common.names.fullname.namespace" -}}
+{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
+{{- end -}}

+ 165 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl

@@ -0,0 +1,165 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Generate secret name.
+
+Usage:
+{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
+  - context - Dict - Required. The context for the template evaluation.
+*/}}
+{{- define "common.secrets.name" -}}
+{{- $name := (include "common.names.fullname" .context) -}}
+
+{{- if .defaultNameSuffix -}}
+{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- with .existingSecret -}}
+{{- if not (typeIs "string" .) -}}
+{{- with .name -}}
+{{- $name = . -}}
+{{- end -}}
+{{- else -}}
+{{- $name = . -}}
+{{- end -}}
+{{- end -}}
+
+{{- printf "%s" $name -}}
+{{- end -}}
+
+{{/*
+Generate secret key.
+
+Usage:
+{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
+
+Params:
+  - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
+    to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+    +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
+  - key - String - Required. Name of the key in the secret.
+*/}}
+{{- define "common.secrets.key" -}}
+{{- $key := .key -}}
+
+{{- if .existingSecret -}}
+  {{- if not (typeIs "string" .existingSecret) -}}
+    {{- if .existingSecret.keyMapping -}}
+      {{- $key = index .existingSecret.keyMapping $.key -}}
+    {{- end -}}
+  {{- end }}
+{{- end -}}
+
+{{- printf "%s" $key -}}
+{{- end -}}
+
+{{/*
+Generate secret password or retrieve one if already created.
+
+Usage:
+{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - length - int - Optional - Length of the generated random password.
+  - strong - Boolean - Optional - Whether to add symbols to the generated random password.
+  - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
+  - context - Context - Required - Parent context.
+
+The order in which this function returns a secret password:
+  1. Already existing 'Secret' resource
+     (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
+  2. Password provided via the values.yaml
+     (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
+  3. Randomly generated secret password
+     (A new random secret password with the length specified in the 'length' parameter will be generated and returned)
+
+*/}}
+{{- define "common.secrets.passwords.manage" -}}
+
+{{- $password := "" }}
+{{- $subchart := "" }}
+{{- $chartName := default "" .chartName }}
+{{- $passwordLength := default 10 .length }}
+{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
+{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
+{{- if $secretData }}
+  {{- if hasKey $secretData .key }}
+    {{- $password = index $secretData .key | quote }}
+  {{- else }}
+    {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
+  {{- end -}}
+{{- else if $providedPasswordValue }}
+  {{- $password = $providedPasswordValue | toString | b64enc | quote }}
+{{- else }}
+
+  {{- if .context.Values.enabled }}
+    {{- $subchart = $chartName }}
+  {{- end -}}
+
+  {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
+  {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
+  {{- $passwordValidationErrors := list $requiredPasswordError -}}
+  {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
+
+  {{- if .strong }}
+    {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
+    {{- $password = randAscii $passwordLength }}
+    {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
+    {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
+  {{- else }}
+    {{- $password = randAlphaNum $passwordLength | b64enc | quote }}
+  {{- end }}
+{{- end -}}
+{{- printf "%s" $password -}}
+{{- end -}}
+
+{{/*
+Reuses the value from an existing secret, otherwise sets its value to a default value.
+
+Usage:
+{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - key - String - Required - Name of the key in the secret.
+  - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
+  - context - Context - Required - Parent context.
+
+*/}}
+{{- define "common.secrets.lookup" -}}
+{{- $value := "" -}}
+{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}}
+{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}}
+{{- if and $secretData (hasKey $secretData .key) -}}
+  {{- $value = index $secretData .key -}}
+{{- else -}}
+  {{- $value = $defaultValue | toString | b64enc -}}
+{{- end -}}
+{{- printf "%s" $value -}}
+{{- end -}}
+
+{{/*
+Returns whether a previous generated secret already exists
+
+Usage:
+{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
+
+Params:
+  - secret - String - Required - Name of the 'Secret' resource where the password is stored.
+  - context - Context - Required - Parent context.
+*/}}
+{{- define "common.secrets.exists" -}}
+{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }}
+{{- if $secret }}
+  {{- true -}}
+{{- end -}}
+{{- end -}}

+ 23 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_storage.tpl

@@ -0,0 +1,23 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Return  the proper Storage Class
+{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
+*/}}
+{{- define "common.storage.class" -}}
+
+{{- $storageClass := .persistence.storageClass -}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- $storageClass = .global.storageClass -}}
+    {{- end -}}
+{{- end -}}
+
+{{- if $storageClass -}}
+  {{- if (eq "-" $storageClass) -}}
+      {{- printf "storageClassName: \"\"" -}}
+  {{- else }}
+      {{- printf "storageClassName: %s" $storageClass -}}
+  {{- end -}}
+{{- end -}}
+
+{{- end -}}

+ 13 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl

@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}

+ 62 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_utils.tpl

@@ -0,0 +1,62 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Print instructions to get a secret value.
+Usage:
+{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
+*/}}
+{{- define "common.utils.secret.getvalue" -}}
+{{- $varname := include "common.utils.fieldToEnvVar" . -}}
+export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
+{{- end -}}
+
+{{/*
+Build env var name given a field
+Usage:
+{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
+*/}}
+{{- define "common.utils.fieldToEnvVar" -}}
+  {{- $fieldNameSplit := splitList "-" .field -}}
+  {{- $upperCaseFieldNameSplit := list -}}
+
+  {{- range $fieldNameSplit -}}
+    {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
+  {{- end -}}
+
+  {{ join "_" $upperCaseFieldNameSplit }}
+{{- end -}}
+
+{{/*
+Gets a value from .Values given
+Usage:
+{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
+*/}}
+{{- define "common.utils.getValueFromKey" -}}
+{{- $splitKey := splitList "." .key -}}
+{{- $value := "" -}}
+{{- $latestObj := $.context.Values -}}
+{{- range $splitKey -}}
+  {{- if not $latestObj -}}
+    {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
+  {{- end -}}
+  {{- $value = ( index $latestObj . ) -}}
+  {{- $latestObj = $value -}}
+{{- end -}}
+{{- printf "%v" (default "" $value) -}} 
+{{- end -}}
+
+{{/*
+Returns first .Values key with a defined value or first of the list if all non-defined
+Usage:
+{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
+*/}}
+{{- define "common.utils.getKeyFromList" -}}
+{{- $key := first .keys -}}
+{{- $reverseKeys := reverse .keys }}
+{{- range $reverseKeys }}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
+  {{- if $value -}}
+    {{- $key = . }}
+  {{- end -}}
+{{- end -}}
+{{- printf "%s" $key -}} 
+{{- end -}}

+ 14 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl

@@ -0,0 +1,14 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Warning about using rolling tag.
+Usage:
+{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
+*/}}
+{{- define "common.warnings.rollingTag" -}}
+
+{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- end -}}

+ 72 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl

@@ -0,0 +1,72 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Cassandra required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.cassandra.passwords" -}}
+  {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
+  {{- $enabled := include "common.cassandra.values.enabled" . -}}
+  {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
+  {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.dbUser.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled cassandra.
+
+Usage:
+{{ include "common.cassandra.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.cassandra.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.cassandra.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key dbUser
+
+Usage:
+{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
+*/}}
+{{- define "common.cassandra.values.key.dbUser" -}}
+  {{- if .subchart -}}
+    cassandra.dbUser
+  {{- else -}}
+    dbUser
+  {{- end -}}
+{{- end -}}

+ 103 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MariaDB required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mariadb.passwords" -}}
+  {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mariadb.values.enabled" . -}}
+  {{- $architecture := include "common.mariadb.values.architecture" . -}}
+  {{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mariadb.
+
+Usage:
+{{ include "common.mariadb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mariadb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mariadb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mariadb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
+*/}}
+{{- define "common.mariadb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mariadb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 108 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl

@@ -0,0 +1,108 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MongoDB&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MongoDB&reg; values are stored, e.g: "mongodb-passwords-secret"
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mongodb.passwords" -}}
+  {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mongodb.values.enabled" . -}}
+  {{- $authPrefix := include "common.mongodb.values.key.auth" . -}}
+  {{- $architecture := include "common.mongodb.values.architecture" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}}
+  {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}}
+
+  {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }}
+    {{- if and $valueUsername $valueDatabase -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replicaset") -}}
+        {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mongodb.
+
+Usage:
+{{ include "common.mongodb.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mongodb.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mongodb.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.key.auth" -}}
+  {{- if .subchart -}}
+    mongodb.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MongoDB&reg; is used as subchart or not. Default: false
+*/}}
+{{- define "common.mongodb.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mongodb.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}

+ 103 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl

@@ -0,0 +1,103 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate MySQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret"
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.mysql.passwords" -}}
+  {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}}
+  {{- $enabled := include "common.mysql.values.enabled" . -}}
+  {{- $architecture := include "common.mysql.values.architecture" . -}}
+  {{- $authPrefix := include "common.mysql.values.key.auth" . -}}
+  {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
+  {{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
+  {{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
+  {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
+
+    {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
+    {{- if not (empty $valueUsername) -}}
+        {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
+    {{- end -}}
+
+    {{- if (eq $architecture "replication") -}}
+        {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.auth.existingSecret" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.auth.existingSecret | quote -}}
+  {{- else -}}
+    {{- .context.Values.auth.existingSecret | quote -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled mysql.
+
+Usage:
+{{ include "common.mysql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.mysql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.mysql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for architecture
+
+Usage:
+{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.architecture" -}}
+  {{- if .subchart -}}
+    {{- .context.Values.mysql.architecture -}}
+  {{- else -}}
+    {{- .context.Values.architecture -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key auth
+
+Usage:
+{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false
+*/}}
+{{- define "common.mysql.values.key.auth" -}}
+  {{- if .subchart -}}
+    mysql.auth
+  {{- else -}}
+    auth
+  {{- end -}}
+{{- end -}}

+ 129 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl

@@ -0,0 +1,129 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate PostgreSQL required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret"
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.postgresql.passwords" -}}
+  {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}}
+  {{- $enabled := include "common.postgresql.values.enabled" . -}}
+  {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
+  {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+    {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
+    {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
+
+    {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}}
+    {{- if (eq $enabledReplication "true") -}}
+        {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}}
+        {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to decide whether evaluate global values.
+
+Usage:
+{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }}
+Params:
+  - key - String - Required. Field to be evaluated within global, e.g: "existingSecret"
+*/}}
+{{- define "common.postgresql.values.use.global" -}}
+  {{- if .context.Values.global -}}
+    {{- if .context.Values.global.postgresql -}}
+      {{- index .context.Values.global.postgresql .key | quote -}}
+    {{- end -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for existingSecret.
+
+Usage:
+{{ include "common.postgresql.values.existingSecret" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.existingSecret" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}}
+
+  {{- if .subchart -}}
+    {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}}
+  {{- else -}}
+    {{- default (.context.Values.existingSecret | quote) $globalValue -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled postgresql.
+
+Usage:
+{{ include "common.postgresql.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.postgresql.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key postgressPassword.
+
+Usage:
+{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.postgressPassword" -}}
+  {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}}
+
+  {{- if not $globalValue -}}
+    {{- if .subchart -}}
+      postgresql.postgresqlPassword
+    {{- else -}}
+      postgresqlPassword
+    {{- end -}}
+  {{- else -}}
+    global.postgresql.postgresqlPassword
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled.replication.
+
+Usage:
+{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.enabled.replication" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.postgresql.replication.enabled -}}
+  {{- else -}}
+    {{- printf "%v" .context.Values.replication.enabled -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for the key replication.password.
+
+Usage:
+{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false
+*/}}
+{{- define "common.postgresql.values.key.replicationPassword" -}}
+  {{- if .subchart -}}
+    postgresql.replication.password
+  {{- else -}}
+    replication.password
+  {{- end -}}
+{{- end -}}

+ 76 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl

@@ -0,0 +1,76 @@
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate Redis&reg; required passwords are not empty.
+
+Usage:
+{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
+Params:
+  - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret"
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.validations.values.redis.passwords" -}}
+  {{- $enabled := include "common.redis.values.enabled" . -}}
+  {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}}
+  {{- $standarizedVersion := include "common.redis.values.standarized.version" . }}
+
+  {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }}
+  {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }}
+
+  {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
+  {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
+
+  {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
+    {{- $requiredPasswords := list -}}
+
+    {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
+    {{- if eq $useAuth "true" -}}
+      {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}}
+      {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}}
+    {{- end -}}
+
+    {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right value for enabled redis.
+
+Usage:
+{{ include "common.redis.values.enabled" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.enabled" -}}
+  {{- if .subchart -}}
+    {{- printf "%v" .context.Values.redis.enabled -}}
+  {{- else -}}
+    {{- printf "%v" (not .context.Values.enabled) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Auxiliary function to get the right prefix path for the values
+
+Usage:
+{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }}
+Params:
+  - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false
+*/}}
+{{- define "common.redis.values.keys.prefix" -}}
+  {{- if .subchart -}}redis.{{- else -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Checks whether the redis chart's includes the standarizations (version >= 14)
+
+Usage:
+{{ include "common.redis.values.standarized.version" (dict "context" $) }}
+*/}}
+{{- define "common.redis.values.standarized.version" -}}
+
+  {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}}
+  {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }}
+
+  {{- if $standarizedAuthValues -}}
+    {{- true -}}
+  {{- end -}}
+{{- end -}}

+ 46 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl

@@ -0,0 +1,46 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Validate values must not be empty.
+
+Usage:
+{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}}
+{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}}
+{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+*/}}
+{{- define "common.validations.values.multiple.empty" -}}
+  {{- range .required -}}
+    {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}}
+  {{- end -}}
+{{- end -}}
+
+{{/*
+Validate a value must not be empty.
+
+Usage:
+{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }}
+
+Validate value params:
+  - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password"
+  - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret"
+  - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password"
+  - subchart - String - Optional - Name of the subchart that the validated password is part of.
+*/}}
+{{- define "common.validations.values.single.empty" -}}
+  {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }}
+  {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }}
+
+  {{- if not $value -}}
+    {{- $varname := "my-value" -}}
+    {{- $getCurrentValue := "" -}}
+    {{- if and .secret .field -}}
+      {{- $varname = include "common.utils.fieldToEnvVar" . -}}
+      {{- $getCurrentValue = printf " To get the current value:\n\n        %s\n" (include "common.utils.secret.getvalue" .) -}}
+    {{- end -}}
+    {{- printf "\n    '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}}
+  {{- end -}}
+{{- end -}}

+ 5 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/values.yaml

@@ -0,0 +1,5 @@
+## bitnami/common
+## It is required by CI/CD tools and processes.
+## @skip exampleValue
+##
+exampleValue: common-chart

+ 76 - 0
kafka/helm/kafka/charts/zookeeper/templates/NOTES.txt

@@ -0,0 +1,76 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }}
+-------------------------------------------------------------------------------
+ WARNING
+
+    By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true"
+    you have most likely exposed the ZooKeeper service externally without any
+    authentication mechanism.
+
+    For security reasons, we strongly suggest that you switch to "ClusterIP" or
+    "NodePort". As alternative, you can also specify a valid password on the
+    "auth.clientPassword" parameter.
+
+-------------------------------------------------------------------------------
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+  command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+  args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+  kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+  kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+    /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh
+
+{{- else }}
+
+ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster:
+
+    {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }}
+
+To connect to your ZooKeeper server run the following commands:
+
+    export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}")
+    kubectl exec -it $POD_NAME -- zkCli.sh
+
+To connect to your ZooKeeper server from outside the cluster execute the following commands:
+
+{{- if eq .Values.service.type "NodePort" }}
+
+    export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+    export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }})
+    zkCli.sh $NODE_IP:$NODE_PORT
+
+{{- else if eq .Values.service.type "LoadBalancer" }}
+
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}'
+
+    export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
+    zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }}
+
+{{- else if eq .Values.service.type "ClusterIP" }}
+
+    kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} &
+    zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }}
+
+{{- end }}
+{{- end }}
+
+{{- include "zookeeper.validateValues" . }}
+{{- include "zookeeper.checkRollingTags" . }}

+ 361 - 0
kafka/helm/kafka/charts/zookeeper/templates/_helpers.tpl

@@ -0,0 +1,361 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Return the proper ZooKeeper image name
+*/}}
+{{- define "zookeeper.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "zookeeper.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "zookeeper.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Check if there are rolling tags in the images
+*/}}
+{{- define "zookeeper.checkRollingTags" -}}
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- end -}}
+
+{{/*
+Return ZooKeeper Namespace to use
+*/}}
+{{- define "zookeeper.namespace" -}}
+{{- if .Values.namespaceOverride -}}
+    {{- .Values.namespaceOverride -}}
+{{- else -}}
+    {{- .Release.Namespace -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "zookeeper.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the ZooKeeper client-server authentication credentials secret
+*/}}
+{{- define "zookeeper.client.secretName" -}}
+{{- if .Values.auth.client.existingSecret -}}
+    {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}}
+{{- else -}}
+    {{- printf "%s-client-auth" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the ZooKeeper server-server authentication credentials secret
+*/}}
+{{- define "zookeeper.quorum.secretName" -}}
+{{- if .Values.auth.quorum.existingSecret -}}
+    {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ZooKeeper client-server authentication credentials secret object should be created
+*/}}
+{{- define "zookeeper.client.createSecret" -}}
+{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ZooKeeper server-server authentication credentials secret object should be created
+*/}}
+{{- define "zookeeper.quorum.createSecret" -}}
+{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Returns the available value for certain key in an existing secret (if it exists),
+otherwise it generates a random value.
+*/}}
+{{- define "getValueFromSecret" }}
+    {{- $len := (default 16 .Length) | int -}}
+    {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}}
+    {{- if $obj }}
+        {{- index $obj .Key | b64dec -}}
+    {{- else -}}
+        {{- randAlphaNum $len -}}
+    {{- end -}}
+{{- end }}
+
+{{/*
+Return the ZooKeeper configuration ConfigMap name
+*/}}
+{{- define "zookeeper.configmapName" -}}
+{{- if .Values.existingConfigmap -}}
+    {{- printf "%s" (tpl .Values.existingConfigmap $) -}}
+{{- else -}}
+    {{- printf "%s" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a ConfigMap object should be created for ZooKeeper configuration
+*/}}
+{{- define "zookeeper.createConfigmap" -}}
+{{- if and .Values.configuration (not .Values.existingConfigmap) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS secret should be created for ZooKeeper quorum
+*/}}
+{{- define "zookeeper.quorum.createTlsSecret" -}}
+{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the secret containing ZooKeeper quorum TLS certificates
+*/}}
+{{- define "zookeeper.quorum.tlsSecretName" -}}
+{{- $secretName := .Values.tls.quorum.existingSecret -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum
+*/}}
+{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}}
+{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name of the secret containing the Keystore and Truststore password
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordsSecret" -}}
+{{- $secretName := .Values.tls.quorum.passwordsSecretName -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS secret should be created for ZooKeeper client
+*/}}
+{{- define "zookeeper.client.createTlsSecret" -}}
+{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the secret containing ZooKeeper client TLS certificates
+*/}}
+{{- define "zookeeper.client.tlsSecretName" -}}
+{{- $secretName := .Values.tls.client.existingSecret -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-client-crt" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum keystore key to be retrieved from tls.quorum.existingSecret.
+*/}}
+{{- define "zookeeper.quorum.tlsKeystoreKey" -}}
+{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.keystore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum truststore key to be retrieved from tls.quorum.existingSecret.
+*/}}
+{{- define "zookeeper.quorum.tlsTruststoreKey" -}}
+{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.truststore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client keystore key to be retrieved from tls.client.existingSecret.
+*/}}
+{{- define "zookeeper.client.tlsKeystoreKey" -}}
+{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.keystore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client truststore key to be retrieved from tls.client.existingSecret.
+*/}}
+{{- define "zookeeper.client.tlsTruststoreKey" -}}
+{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "zookeeper.truststore.jks" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client
+*/}}
+{{- define "zookeeper.client.createTlsPasswordsSecret" -}}
+{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the name of the secret containing the Keystore and Truststore password
+*/}}
+{{- define "zookeeper.client.tlsPasswordsSecret" -}}
+{{- $secretName := .Values.tls.client.passwordsSecretName -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName.
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}}
+{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "keystore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName.
+*/}}
+{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}}
+{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "truststore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client keystore password key to be retrieved from tls.client.passwordSecretName.
+*/}}
+{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}}
+{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}}
+    {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}}
+{{- else -}}
+    {{- printf "keystore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the client truststore password key to be retrieved from tls.client.passwordSecretName.
+*/}}
+{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}}
+{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}}
+    {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}}
+{{- else -}}
+    {{- printf "truststore-password" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "zookeeper.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}}
+{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Authentication enabled
+*/}}
+{{- define "zookeeper.validateValues.client.auth" -}}
+{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }}
+zookeeper: auth.client.enabled
+    In order to enable client-server authentication, you need to provide the list
+    of users to be created and the user to use for clients authentication.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Authentication enabled
+*/}}
+{{- define "zookeeper.validateValues.quorum.auth" -}}
+{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }}
+zookeeper: auth.quorum.enabled
+    In order to enable server-server authentication, you need to provide the list
+    of users to be created and the user to use for quorum authentication.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Client TLS enabled
+*/}}
+{{- define "zookeeper.validateValues.client.tls" -}}
+{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }}
+zookeeper: tls.client.enabled
+    In order to enable Client TLS encryption, you also need to provide
+    an existing secret containing the Keystore and Truststore or
+    enable auto-generated certificates.
+{{- end -}}
+{{- end -}}
+
+{{/*
+Validate values of ZooKeeper - Quorum TLS enabled
+*/}}
+{{- define "zookeeper.validateValues.quorum.tls" -}}
+{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }}
+zookeeper: tls.quorum.enabled
+    In order to enable Quorum TLS, you also need to provide
+    an existing secret containing the Keystore and Truststore or
+    enable auto-generated certificates.
+{{- end -}}
+{{- end -}}

+ 17 - 0
kafka/helm/kafka/charts/zookeeper/templates/configmap.yaml

@@ -0,0 +1,17 @@
+{{- if (include "zookeeper.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  zoo.cfg: |-
+    {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }}
+{{- end }}

+ 4 - 0
kafka/helm/kafka/charts/zookeeper/templates/extra-list.yaml

@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}

+ 29 - 0
kafka/helm/kafka/charts/zookeeper/templates/metrics-svc.yaml

@@ -0,0 +1,29 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "common.names.fullname" . }}-metrics
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.metrics.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ .Values.metrics.service.type }}
+  ports:
+    - name: tcp-metrics
+      port: {{ .Values.metrics.service.port }}
+      targetPort: metrics
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+{{- end }}

+ 41 - 0
kafka/helm/kafka/charts/zookeeper/templates/networkpolicy.yaml

@@ -0,0 +1,41 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  podSelector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+  policyTypes:
+    - Ingress
+  ingress:
+    # Allow inbound connections to ZooKeeper
+    - ports:
+        - port: {{ .Values.containerPorts.client }}
+        {{- if .Values.metrics.enabled }}
+        - port: {{ .Values.metrics.containerPort }}
+        {{- end }}
+      {{- if not .Values.networkPolicy.allowExternal }}
+      from:
+        - podSelector:
+            matchLabels:
+              {{ include "common.names.fullname" . }}-client: "true"
+        - podSelector:
+            matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+      {{- end }}
+    # Allow internal communications between nodes
+    - ports:
+        - port: {{ .Values.containerPorts.follower }}
+        - port: {{ .Values.containerPorts.election }}
+      from:
+        - podSelector:
+            matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }}
+{{- end }}

+ 26 - 0
kafka/helm/kafka/charts/zookeeper/templates/pdb.yaml

@@ -0,0 +1,26 @@
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and .Values.pdb.create (gt $replicaCount 1) }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if .Values.pdb.minAvailable }}
+  minAvailable: {{ .Values.pdb.minAvailable }}
+  {{- end  }}
+  {{- if .Values.pdb.maxUnavailable }}
+  maxUnavailable: {{ .Values.pdb.maxUnavailable }}
+  {{- end  }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: zookeeper
+{{- end }}

+ 27 - 0
kafka/helm/kafka/charts/zookeeper/templates/prometheusrule.yaml

@@ -0,0 +1,27 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  {{- if .Values.metrics.prometheusRule.namespace }}
+  namespace: {{ .Values.metrics.prometheusRule.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.metrics.prometheusRule.additionalLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  groups:
+    - name: {{ include "common.names.fullname" . }}
+      rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }}
+{{- end }}
+

+ 102 - 0
kafka/helm/kafka/charts/zookeeper/templates/scripts-configmap.yaml

@@ -0,0 +1,102 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  init-certs.sh: |-
+    #!/bin/bash
+
+    {{- if .Values.tls.client.enabled }}
+    if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then
+        if [[ -f "/opt/bitnami/zookeeper/config/certs/client/.initialized" ]]; then
+            exit 0
+        fi
+        openssl pkcs12 -export -in "/certs/client/tls.crt" \
+          -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -inkey "/certs/client/tls.key" \
+          -out "/tmp/keystore.p12"
+        keytool -importkeystore -srckeystore "/tmp/keystore.p12" \
+          -srcstoretype PKCS12 \
+          -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \
+          -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks"
+        rm "/tmp/keystore.p12"
+        keytool -import -file "/certs/client/ca.crt" \
+              -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \
+              -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \
+              -noprompt
+        touch /opt/bitnami/zookeeper/config/certs/client/.initialized
+    {{- if .Values.tls.client.autoGenerated }}
+    else
+        echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- else }}
+    elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then
+        cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks"
+        cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks"
+    else
+        echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- end }}
+    {{- end }}
+    {{- if .Values.tls.quorum.enabled }}
+    if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then
+        openssl pkcs12 -export -in "/certs/quorum/tls.crt" \
+          -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -inkey "/certs/quorum/tls.key" \
+          -out "/tmp/keystore.p12"
+        keytool -importkeystore -srckeystore "/tmp/keystore.p12" \
+          -srcstoretype PKCS12 \
+          -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \
+          -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks"
+        rm "/tmp/keystore.p12"
+        keytool -import -file "/certs/quorum/ca.crt" \
+              -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \
+              -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \
+              -noprompt
+    {{- if .Values.tls.quorum.autoGenerated }}
+    else
+        echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled."
+        exit 1
+    fi
+    {{- else }}
+    elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then
+        cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks"
+        cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks"
+    else
+        echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled."
+        exit 1
+    fi
+    {{- end }}
+    {{- end }}
+  setup.sh: |-
+    #!/bin/bash
+
+    # Execute entrypoint as usual after obtaining ZOO_SERVER_ID
+    # check ZOO_SERVER_ID in persistent volume via myid
+    # if not present, set based on POD hostname
+    if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
+        export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
+    else
+        HOSTNAME="$(hostname -s)"
+        if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
+            ORD=${BASH_REMATCH[2]}
+            export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))"
+        else
+            echo "Failed to get index from hostname $HOSTNAME"
+            exit 1
+        fi
+    fi
+    exec /entrypoint.sh /run.sh

+ 77 - 0
kafka/helm/kafka/charts/zookeeper/templates/secrets.yaml

@@ -0,0 +1,77 @@
+{{- if (include "zookeeper.client.createSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }}
+  server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }}
+  quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "common.names.fullname" . }}-client-tls-pass
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }}
+  truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "common.names.fullname" . }}-quorum-tls-pass
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }}
+  truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }}
+{{- end }}

+ 21 - 0
kafka/helm/kafka/charts/zookeeper/templates/serviceaccount.yaml

@@ -0,0 +1,21 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "zookeeper.serviceAccountName" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    role: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  annotations:
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.serviceAccount.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
+{{- end }}

+ 53 - 0
kafka/helm/kafka/charts/zookeeper/templates/servicemonitor.yaml

@@ -0,0 +1,53 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- else }}
+  namespace: {{ .Release.Namespace }}
+  {{- end }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.metrics.serviceMonitor.additionalLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if .Values.metrics.serviceMonitor.jobLabel }}
+  jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
+  {{- end }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      {{- if .Values.metrics.serviceMonitor.selector }}
+      {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
+      {{- end }}
+      app.kubernetes.io/component: metrics
+  endpoints:
+    - port: tcp-metrics
+      path: "/metrics"
+      {{- if .Values.metrics.serviceMonitor.interval }}
+      interval: {{ .Values.metrics.serviceMonitor.interval }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.relabelings }}
+      relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.metricRelabelings }}
+      metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.honorLabels }}
+      honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}
+      {{- end }}
+  namespaceSelector:
+    matchNames:
+      - {{ template "zookeeper.namespace" . }}
+{{- end }}

+ 532 - 0
kafka/helm/kafka/charts/zookeeper/templates/statefulset.yaml

@@ -0,0 +1,532 @@
+apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    role: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  podManagementPolicy: {{ .Values.podManagementPolicy }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: zookeeper
+  serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }}
+  {{- if .Values.updateStrategy }}
+  updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }}
+  {{- end }}
+  template:
+    metadata:
+      annotations:
+        {{- if .Values.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+        {{- if (include "zookeeper.createConfigmap" .) }}
+        checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+        {{- end }}
+        {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+        checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
+        {{- end }}
+        {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }}
+        checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }}
+        {{- end }}
+      labels: {{- include "common.labels.standard" . | nindent 8 }}
+        app.kubernetes.io/component: zookeeper
+        {{- if .Values.podLabels }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccountName: {{ template "zookeeper.serviceAccountName" . }}
+      {{- include "zookeeper.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.hostAliases }}
+      hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.affinity }}
+      affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }}
+      {{- else }}
+      affinity:
+        podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }}
+        podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }}
+        nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
+      {{- end }}
+      {{- if .Values.nodeSelector }}
+      nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.tolerations }}
+      tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.topologySpreadConstraints }}
+      topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.priorityClassName }}
+      priorityClassName: {{ .Values.priorityClassName }}
+      {{- end }}
+      {{- if .Values.schedulerName }}
+      schedulerName: {{ .Values.schedulerName }}
+      {{- end }}
+      {{- if .Values.podSecurityContext.enabled }}
+      securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
+      {{- end }}
+      initContainers:
+        {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ template "zookeeper.volumePermissions.image" . }}
+          imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+          args:
+            - -ec
+            - |
+              mkdir -p /bitnami/zookeeper
+              chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper
+              find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
+              {{- if .Values.dataLogDir }}
+              mkdir -p {{ .Values.dataLogDir }}
+              chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }}
+              find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
+              {{- end }}
+          {{- if .Values.volumePermissions.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.volumePermissions.resources }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: /bitnami/zookeeper
+            {{- if .Values.dataLogDir }}
+            - name: data-log
+              mountPath: {{ .Values.dataLogDir }}
+            {{- end }}
+        {{- end }}
+        {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }}
+        - name: init-certs
+          image: {{ include "zookeeper.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          command:
+            - /scripts/init-certs.sh
+          env:
+            - name: MY_POD_NAME
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.name
+            {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+          {{- if .Values.tls.resources }}
+          resources: {{- toYaml .Values.tls.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: scripts
+              mountPath: /scripts/init-certs.sh
+              subPath: init-certs.sh
+            {{- if or .Values.tls.client.enabled }}
+            - name: client-certificates
+              mountPath: /certs/client
+            - name: client-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/client
+            {{- end }}
+            {{- if or .Values.tls.quorum.enabled }}
+            - name: quorum-certificates
+              mountPath: /certs/quorum
+            - name: quorum-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/quorum
+            {{- end }}
+        {{- end }}
+        {{- if .Values.initContainers }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }}
+        {{- end }}
+      containers:
+        - name: zookeeper
+          image: {{ template "zookeeper.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+          {{- else if .Values.command }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+          {{- else if .Values.args }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.resources }}
+          resources: {{- toYaml .Values.resources | nindent 12 }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+            - name: ZOO_DATA_LOG_DIR
+              value: {{ .Values.dataLogDir | quote }}
+            - name: ZOO_PORT_NUMBER
+              value: {{ .Values.containerPorts.client | quote }}
+            - name: ZOO_TICK_TIME
+              value: {{ .Values.tickTime | quote }}
+            - name: ZOO_INIT_LIMIT
+              value: {{ .Values.initLimit | quote }}
+            - name: ZOO_SYNC_LIMIT
+              value: {{ .Values.syncLimit | quote }}
+            - name: ZOO_PRE_ALLOC_SIZE
+              value: {{ .Values.preAllocSize | quote }}
+            - name: ZOO_SNAPCOUNT
+              value: {{ .Values.snapCount | quote }}
+            - name: ZOO_MAX_CLIENT_CNXNS
+              value: {{ .Values.maxClientCnxns | quote }}
+            - name: ZOO_4LW_COMMANDS_WHITELIST
+              value: {{ .Values.fourlwCommandsWhitelist | quote }}
+            - name: ZOO_LISTEN_ALLIPS_ENABLED
+              value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }}
+            - name: ZOO_AUTOPURGE_INTERVAL
+              value: {{ .Values.autopurge.purgeInterval | quote }}
+            - name: ZOO_AUTOPURGE_RETAIN_COUNT
+              value: {{ .Values.autopurge.snapRetainCount | quote }}
+            - name: ZOO_MAX_SESSION_TIMEOUT
+              value: {{ .Values.maxSessionTimeout | quote }}
+            - name: ZOO_SERVERS
+              {{- $replicaCount := int .Values.replicaCount }}
+              {{- $minServerId := int .Values.minServerId }}
+              {{- $followerPort := int .Values.containerPorts.follower }}
+              {{- $electionPort := int .Values.containerPorts.election }}
+              {{- $releaseNamespace := include "zookeeper.namespace" . }}
+              {{- $zookeeperFullname := include "common.names.fullname" . }}
+              {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63  }}
+              {{- $clusterDomain := .Values.clusterDomain }}
+              value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }}
+            - name: ZOO_ENABLE_AUTH
+              value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }}
+            {{- if .Values.auth.client.enabled }}
+            - name: ZOO_CLIENT_USER
+              value: {{ .Values.auth.client.clientUser | quote }}
+            - name: ZOO_CLIENT_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.secretName" . }}
+                  key: client-password
+            - name: ZOO_SERVER_USERS
+              value: {{ .Values.auth.client.serverUsers | quote }}
+            - name: ZOO_SERVER_PASSWORDS
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.secretName" . }}
+                  key: server-password
+            {{- end }}
+            - name: ZOO_ENABLE_QUORUM_AUTH
+              value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }}
+            {{- if .Values.auth.quorum.enabled }}
+            - name: ZOO_QUORUM_LEARNER_USER
+              value: {{ .Values.auth.quorum.learnerUser | quote }}
+            - name: ZOO_QUORUM_LEARNER_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.secretName" . }}
+                  key: quorum-learner-password
+            - name: ZOO_QUORUM_SERVER_USERS
+              value: {{ .Values.auth.quorum.serverUsers | quote }}
+            - name: ZOO_QUORUM_SERVER_PASSWORDS
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.secretName" . }}
+                  key: quorum-server-password
+            {{- end }}
+            - name: ZOO_HEAP_SIZE
+              value: {{ .Values.heapSize | quote }}
+            - name: ZOO_LOG_LEVEL
+              value: {{ .Values.logLevel | quote }}
+            - name: ALLOW_ANONYMOUS_LOGIN
+              value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }}
+            {{- if .Values.jvmFlags }}
+            - name: JVMFLAGS
+              value: {{ .Values.jvmFlags | quote }}
+            {{- end }}
+            {{- if .Values.metrics.enabled }}
+            - name: ZOO_ENABLE_PROMETHEUS_METRICS
+              value: "yes"
+            - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER
+              value: {{ .Values.metrics.containerPort | quote }}
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: ZOO_TLS_PORT_NUMBER
+              value: {{ .Values.containerPorts.tls | quote }}
+            - name: ZOO_TLS_CLIENT_ENABLE
+              value: {{ .Values.tls.client.enabled | quote }}
+            - name: ZOO_TLS_CLIENT_AUTH
+              value: {{ .Values.tls.client.auth | quote }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_FILE
+              value: {{ .Values.tls.client.keystorePath | quote }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE
+              value: {{ .Values.tls.client.truststorePath | quote }}
+            {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }}
+            - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }}
+            - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.client.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- end }}
+            {{- if .Values.tls.quorum.enabled }}
+            - name: ZOO_TLS_QUORUM_ENABLE
+              value: {{ .Values.tls.quorum.enabled | quote }}
+            - name: ZOO_TLS_QUORUM_CLIENT_AUTH
+              value: {{ .Values.tls.quorum.auth | quote }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_FILE
+              value: {{ .Values.tls.quorum.keystorePath | quote }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE
+              value: {{ .Values.tls.quorum.truststorePath | quote }}
+            {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }}
+            - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }}
+            {{- end }}
+            {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }}
+            - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }}
+                  key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }}
+            {{- end }}
+            {{- end }}
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.name
+            {{- if .Values.extraEnvVars }}
+            {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
+            {{- end }}
+          {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+          envFrom:
+            {{- if .Values.extraEnvVarsCM }}
+            - configMapRef:
+                name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }}
+            {{- end }}
+            {{- if .Values.extraEnvVarsSecret }}
+            - secretRef:
+                name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }}
+            {{- end }}
+          {{- end }}
+          ports:
+            {{- if not .Values.service.disableBaseClientPort }}
+            - name: client
+              containerPort: {{ .Values.containerPorts.client }}
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: client-tls
+              containerPort: {{ .Values.containerPorts.tls }}
+            {{- end }}
+            - name: follower
+              containerPort: {{ .Values.containerPorts.follower }}
+            - name: election
+              containerPort: {{ .Values.containerPorts.election }}
+            {{- if .Values.metrics.enabled }}
+            - name: metrics
+              containerPort: {{ .Values.metrics.containerPort }}
+            {{- end }}
+          {{- if not .Values.diagnosticMode.enabled }}
+          {{- if .Values.customLivenessProbe }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
+          {{- else if .Values.livenessProbe.enabled }}
+          livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }}
+            exec:
+              {{- if not .Values.service.disableBaseClientPort }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok']
+              {{- else if not .Values.tls.client.enabled }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok']
+              {{- else }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok']
+              {{- end }}
+          {{- end }}
+          {{- if .Values.customReadinessProbe }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
+          {{- else if .Values.readinessProbe.enabled }}
+          readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }}
+            exec:
+              {{- if not .Values.service.disableBaseClientPort }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok']
+              {{- else if not .Values.tls.client.enabled }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok']
+              {{- else }}
+              command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok']
+              {{- end }}
+          {{- end }}
+          {{- if .Values.customStartupProbe }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
+          {{- else if .Values.startupProbe.enabled }}
+          startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }}
+            tcpSocket:
+              {{- if not .Values.service.disableBaseClientPort }}
+              port: client
+              {{- else }}
+              port: follower
+              {{- end }}
+          {{- end }}
+          {{- end }}
+          {{- if .Values.lifecycleHooks }}
+          lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: scripts
+              mountPath: /scripts/setup.sh
+              subPath: setup.sh
+            - name: data
+              mountPath: /bitnami/zookeeper
+            {{- if .Values.dataLogDir }}
+            - name: data-log
+              mountPath: {{ .Values.dataLogDir }}
+            {{- end }}
+            {{- if or .Values.configuration .Values.existingConfigmap }}
+            - name: config
+              mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg
+              subPath: zoo.cfg
+            {{- end }}
+            {{- if .Values.tls.client.enabled }}
+            - name: client-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/client
+              readOnly: true
+            {{- end }}
+            {{- if .Values.tls.quorum.enabled }}
+            - name: quorum-shared-certs
+              mountPath: /opt/bitnami/zookeeper/config/certs/quorum
+              readOnly: true
+            {{- end }}
+            {{- if .Values.extraVolumeMounts }}
+            {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }}
+            {{- end }}
+        {{- if .Values.sidecars }}
+        {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }}
+        {{- end }}
+      volumes:
+        - name: scripts
+          configMap:
+            name: {{ printf "%s-scripts" (include "common.names.fullname" .) }}
+            defaultMode: 0755
+        {{- if or .Values.configuration .Values.existingConfigmap }}
+        - name: config
+          configMap:
+            name: {{ include "zookeeper.configmapName" . }}
+        {{- end }}
+        {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
+        - name: data
+          persistentVolumeClaim:
+            claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }}
+        {{- else if not .Values.persistence.enabled }}
+        - name: data
+          emptyDir: {}
+        {{- end }}
+        {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }}
+        - name: data-log
+          persistentVolumeClaim:
+            claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }}
+        {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }}
+        - name: data-log
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.tls.client.enabled }}
+        - name: client-certificates
+          secret:
+            secretName: {{ include "zookeeper.client.tlsSecretName" . }}
+            defaultMode: 256
+        - name: client-shared-certs
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.tls.quorum.enabled }}
+        - name: quorum-certificates
+          secret:
+            secretName: {{ include "zookeeper.quorum.tlsSecretName" . }}
+            defaultMode: 256
+        - name: quorum-shared-certs
+          emptyDir: {}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
+        {{- end }}
+  {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }}
+  volumeClaimTemplates:
+    {{- if not .Values.persistence.existingClaim }}
+    - metadata:
+        name: data
+        {{- if .Values.persistence.annotations }}
+        annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
+        {{- end }}
+        {{- if .Values.persistence.labels }}
+        labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+        {{- range .Values.persistence.accessModes }}
+          - {{ . | quote }}
+        {{- end }}
+        resources:
+          requests:
+            storage: {{ .Values.persistence.size | quote }}
+        {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }}
+        {{- if .Values.persistence.selector }}
+        selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }}
+        {{- end }}
+    {{- end }}
+    {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }}
+    - metadata:
+        name: data-log
+        {{- if .Values.persistence.annotations }}
+        annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
+        {{- end }}
+        {{- if .Values.persistence.labels }}
+        labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+        {{- range .Values.persistence.accessModes }}
+          - {{ . | quote }}
+        {{- end }}
+        resources:
+          requests:
+            storage: {{ .Values.persistence.dataLogDir.size | quote }}
+        {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }}
+        {{- if .Values.persistence.dataLogDir.selector }}
+        selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }}
+        {{- end }}
+    {{- end }}
+  {{- end }}

+ 42 - 0
kafka/helm/kafka/charts/zookeeper/templates/svc-headless.yaml

@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.commonAnnotations .Values.service.headless.annotations }}
+  annotations:
+    {{- if .Values.service.headless.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: ClusterIP
+  clusterIP: None
+  publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }}
+  ports:
+    {{- if not .Values.service.disableBaseClientPort }}
+    - name: tcp-client
+      port: {{ .Values.service.ports.client }}
+      targetPort: client
+    {{- end }}
+    {{- if .Values.tls.client.enabled }}
+    - name: tcp-client-tls
+      port: {{ .Values.service.ports.tls }}
+      targetPort: client-tls
+    {{- end }}
+    - name: tcp-follower
+      port: {{ .Values.service.ports.follower }}
+      targetPort: follower
+    - name: tcp-election
+      port: {{ .Values.service.ports.election }}
+      targetPort: election
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper

+ 71 - 0
kafka/helm/kafka/charts/zookeeper/templates/svc.yaml

@@ -0,0 +1,71 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "common.names.fullname" . }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.commonAnnotations .Values.service.annotations }}
+  annotations:
+    {{- if .Values.service.annotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: {{ .Values.service.type }}
+  {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }}
+  clusterIP: {{ .Values.service.clusterIP }}
+  {{- end }}
+  {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
+  externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }}
+  loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
+  {{- end }}
+  {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.service.sessionAffinity }}
+  {{- end }}
+  {{- if .Values.service.sessionAffinityConfig }}
+  sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }}
+  {{- end }}
+  ports:
+    {{- if not .Values.service.disableBaseClientPort }}
+    - name: tcp-client
+      port: {{ .Values.service.ports.client }}
+      targetPort: client
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }}
+      nodePort: {{ .Values.service.nodePorts.client }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    {{- if .Values.tls.client.enabled }}
+    - name: tcp-client-tls
+      port: {{ .Values.service.ports.tls }}
+      targetPort: client-tls
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }}
+      nodePort: {{ .Values.service.nodePorts.tls }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
+    - name: tcp-follower
+      port: {{ .Values.service.ports.follower }}
+      targetPort: follower
+    - name: tcp-election
+      port: {{ .Values.service.ports.election }}
+      targetPort: election
+    {{- if .Values.service.extraPorts }}
+    {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
+    {{- end }}
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: zookeeper

+ 57 - 0
kafka/helm/kafka/charts/zookeeper/templates/tls-secrets.yaml

@@ -0,0 +1,57 @@
+{{- if (include "zookeeper.client.createTlsSecret" .) }}
+{{- $secretName := printf "%s-client-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "zookeeper-client-ca" 365 }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $serviceName := include "common.names.fullname" . }}
+{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}
+{{- if (include "zookeeper.quorum.createTlsSecret" .) }}
+{{- $secretName := printf "%s-quorum-crt" (include "common.names.fullname" .) }}
+{{- $ca := genCA "zookeeper-quorum-ca" 365 }}
+{{- $releaseNamespace := .Release.Namespace }}
+{{- $clusterDomain := .Values.clusterDomain }}
+{{- $fullname := include "common.names.fullname" . }}
+{{- $serviceName := include "common.names.fullname" . }}
+{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }}
+{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }}
+{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+  namespace: {{ template "zookeeper.namespace" . }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: kubernetes.io/tls
+data:
+  tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
+  tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
+  ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
+{{- end }}

+ 879 - 0
kafka/helm/kafka/charts/zookeeper/values.yaml

@@ -0,0 +1,879 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+  imageRegistry: ""
+  ## E.g.
+  ## imagePullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  imagePullSecrets: []
+  storageClass: ""
+
+## @section Common parameters
+##
+
+## @param kubeVersion Override Kubernetes version
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Extra objects to deploy (evaluated as a template)
+##
+extraDeploy: []
+## @param commonLabels Add labels to all the deployed resources
+##
+commonLabels: {}
+## @param commonAnnotations Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+## @param namespaceOverride Override namespace for ZooKeeper resources
+## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent
+##
+namespaceOverride: ""
+
+## Enable diagnostic mode in the statefulset
+##
+diagnosticMode:
+  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+  ##
+  enabled: false
+  ## @param diagnosticMode.command Command to override all containers in the statefulset
+  ##
+  command:
+    - sleep
+  ## @param diagnosticMode.args Args to override all containers in the statefulset
+  ##
+  args:
+    - infinity
+
+## @section ZooKeeper chart parameters
+
+## Bitnami ZooKeeper image version
+## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/
+## @param image.registry ZooKeeper image registry
+## @param image.repository ZooKeeper image repository
+## @param image.tag ZooKeeper image tag (immutable tags are recommended)
+## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+## @param image.pullPolicy ZooKeeper image pull policy
+## @param image.pullSecrets Specify docker-registry secret names as an array
+## @param image.debug Specify if debug values should be set
+##
+image:
+  registry: docker.io
+  repository: bitnami/zookeeper
+  tag: 3.8.1-debian-11-r36
+  digest: ""
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ## Example:
+  ## pullSecrets:
+  ##   - myRegistryKeySecretName
+  ##
+  pullSecrets: []
+  ## Set to true if you would like to see extra information on logs
+  ##
+  debug: false
+## Authentication parameters
+##
+auth:
+  client:
+    ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5
+    ##
+    enabled: false
+    ## @param auth.client.clientUser User that will use ZooKeeper clients to auth
+    ##
+    clientUser: ""
+    ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth
+    ##
+    clientPassword: ""
+    ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created
+    ## Specify them as a string, for example: "user1,user2,admin"
+    ##
+    serverUsers: ""
+    ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
+    ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
+    ##
+    serverPasswords: ""
+    ## @param auth.client.existingSecret Use existing secret (ignores previous passwords)
+    ##
+    existingSecret: ""
+  quorum:
+    ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5
+    ##
+    enabled: false
+    ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
+    ## Note: Make sure the user is included in auth.quorum.serverUsers
+    ##
+    learnerUser: ""
+    ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
+    ##
+    learnerPassword: ""
+    ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers.
+    ## Specify them as a string, for example: "user1,user2,admin"
+    ##
+    serverUsers: ""
+    ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
+    ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
+    ##
+    serverPasswords: ""
+    ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords)
+    ##
+    existingSecret: ""
+## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats
+##
+tickTime: 2000
+## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader
+##
+initLimit: 10
+## @param syncLimit How far out of date a server can be from a leader
+##
+syncLimit: 5
+## @param preAllocSize Block size for transaction log file
+##
+preAllocSize: 65536
+## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled)
+##
+snapCount: 100000
+## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble
+##
+maxClientCnxns: 60
+## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate
+## Defaults to 20 times the tickTime
+##
+maxSessionTimeout: 40000
+## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms)
+## This env var is ignored if Xmx an Xms are configured via `jvmFlags`
+##
+heapSize: 1024
+## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed
+##
+fourlwCommandsWhitelist: srvr, mntr, ruok
+## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively
+## Servers increment their ID starting at this minimal value.
+## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively.
+##
+minServerId: 1
+## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses
+##
+listenOnAllIPs: false
+## Ongoing data directory cleanup configuration
+##
+autopurge:
+  ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain
+  ##
+  snapRetainCount: 3
+  ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered
+  ## Set to a positive integer to enable the auto purging
+  ##
+  purgeInterval: 0
+## @param logLevel Log level for the ZooKeeper server. ERROR by default
+## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs
+##
+logLevel: ERROR
+## @param jvmFlags Default JVM flags for the ZooKeeper process
+##
+jvmFlags: ""
+## @param dataLogDir Dedicated data log directory
+## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots.
+## E.g.
+## dataLogDir: /bitnami/zookeeper/dataLog
+##
+dataLogDir: ""
+## @param configuration Configure ZooKeeper with a custom zoo.cfg file
+## e.g:
+## configuration: |-
+##   deploy-working-dir=/bitnami/geode/data
+##   log-level=info
+##   ...
+##
+configuration: ""
+## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper
+## NOTE: When it's set the `configuration` parameter is ignored
+##
+existingConfigmap: ""
+## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes
+## e.g:
+## extraEnvVars:
+##   - name: FOO
+##     value: "bar"
+##
+extraEnvVars: []
+## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes
+##
+extraEnvVarsCM: ""
+## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes
+##
+extraEnvVarsSecret: ""
+## @param command Override default container command (useful when using custom images)
+##
+command:
+  - /scripts/setup.sh
+## @param args Override default container args (useful when using custom images)
+##
+args: []
+
+## @section Statefulset parameters
+
+## @param replicaCount Number of ZooKeeper nodes
+##
+replicaCount: 1
+## @param containerPorts.client ZooKeeper client container port
+## @param containerPorts.tls ZooKeeper TLS container port
+## @param containerPorts.follower ZooKeeper follower container port
+## @param containerPorts.election ZooKeeper election container port
+##
+containerPorts:
+  client: 2181
+  tls: 3181
+  follower: 2888
+  election: 3888
+## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers
+## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+## @param livenessProbe.periodSeconds Period seconds for livenessProbe
+## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
+## @param livenessProbe.successThreshold Success threshold for livenessProbe
+## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+  probeCommandTimeout: 2
+## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers
+## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+## @param readinessProbe.periodSeconds Period seconds for readinessProbe
+## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
+## @param readinessProbe.successThreshold Success threshold for readinessProbe
+## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe
+##
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 5
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+  probeCommandTimeout: 2
+## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers
+## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
+## @param startupProbe.periodSeconds Period seconds for startupProbe
+## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
+## @param startupProbe.failureThreshold Failure threshold for startupProbe
+## @param startupProbe.successThreshold Success threshold for startupProbe
+##
+startupProbe:
+  enabled: false
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 1
+  failureThreshold: 15
+  successThreshold: 1
+## @param customLivenessProbe Custom livenessProbe that overrides the default one
+##
+customLivenessProbe: {}
+## @param customReadinessProbe Custom readinessProbe that overrides the default one
+##
+customReadinessProbe: {}
+## @param customStartupProbe Custom startupProbe that overrides the default one
+##
+customStartupProbe: {}
+## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup
+##
+lifecycleHooks: {}
+## ZooKeeper resource requests and limits
+## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+## @param resources.limits The resources limits for the ZooKeeper containers
+## @param resources.requests.memory The requested memory for the ZooKeeper containers
+## @param resources.requests.cpu The requested cpu for the ZooKeeper containers
+##
+resources:
+  limits: {}
+  requests:
+    memory: 256Mi
+    cpu: 250m
+## Configure Pods Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context
+## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup
+##
+podSecurityContext:
+  enabled: true
+  fsGroup: 1001
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context
+## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser
+## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot
+## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege
+##
+containerSecurityContext:
+  enabled: true
+  runAsUser: 1001
+  runAsNonRoot: true
+  allowPrivilegeEscalation: false
+## @param hostAliases ZooKeeper pods host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+## @param podLabels Extra labels for ZooKeeper pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+## @param podAnnotations Annotations for ZooKeeper pods
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAffinityPreset: ""
+## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+##
+podAntiAffinityPreset: soft
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+##
+nodeAffinityPreset:
+  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+  ##
+  type: ""
+  ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
+  ## E.g.
+  ## key: "kubernetes.io/e2e-az-name"
+  ##
+  key: ""
+  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
+  ## E.g.
+  ## values:
+  ##   - e2e-az1
+  ##   - e2e-az2
+  ##
+  values: []
+## @param affinity Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
+##
+affinity: {}
+## @param nodeSelector Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+## @param tolerations Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
+## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
+##
+topologySpreadConstraints: []
+## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel`
+## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
+##
+podManagementPolicy: Parallel
+## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand
+## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+priorityClassName: ""
+## @param schedulerName Kubernetes pod scheduler registry
+## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+## @param updateStrategy.type ZooKeeper statefulset strategy type
+## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+##
+updateStrategy:
+  type: RollingUpdate
+  rollingUpdate: {}
+## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumes:
+## - name: zookeeper-keystore
+##   secret:
+##     defaultMode: 288
+##     secretName: zookeeper-keystore
+## - name: zookeeper-truststore
+##   secret:
+##     defaultMode: 288
+##     secretName: zookeeper-truststore
+##
+extraVolumes: []
+## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s)
+## Example Use Case: mount certificates to enable TLS
+## e.g:
+## extraVolumeMounts:
+## - name: zookeeper-keystore
+##   mountPath: /certs/keystore
+##   readOnly: true
+## - name: zookeeper-truststore
+##   mountPath: /certs/truststore
+##   readOnly: true
+##
+extraVolumeMounts: []
+## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s)
+## e.g:
+## sidecars:
+##   - name: your-image-name
+##     image: your-image
+##     imagePullPolicy: Always
+##     ports:
+##       - name: portname
+##         containerPort: 1234
+##
+sidecars: []
+## @param initContainers Add additional init containers to the ZooKeeper pod(s)
+## Example:
+## initContainers:
+##   - name: your-image-name
+##     image: your-image
+##     imagePullPolicy: Always
+##     ports:
+##       - name: portname
+##         containerPort: 1234
+##
+initContainers: []
+## ZooKeeper Pod Disruption Budget
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+## @param pdb.create Deploy a pdb object for the ZooKeeper pod
+## @param pdb.minAvailable Minimum available ZooKeeper replicas
+## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas
+##
+pdb:
+  create: false
+  minAvailable: ""
+  maxUnavailable: 1
+
+## @section Traffic Exposure parameters
+
+service:
+  ## @param service.type Kubernetes Service type
+  ##
+  type: ClusterIP
+  ## @param service.ports.client ZooKeeper client service port
+  ## @param service.ports.tls ZooKeeper TLS service port
+  ## @param service.ports.follower ZooKeeper follower service port
+  ## @param service.ports.election ZooKeeper election service port
+  ##
+  ports:
+    client: 2181
+    tls: 3181
+    follower: 2888
+    election: 3888
+  ## Node ports to expose
+  ## NOTE: choose port between <30000-32767>
+  ## @param service.nodePorts.client Node port for clients
+  ## @param service.nodePorts.tls Node port for TLS
+  ##
+  nodePorts:
+    client: ""
+    tls: ""
+  ## @param service.disableBaseClientPort Remove client port from service definitions.
+  ##
+  disableBaseClientPort: false
+  ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
+  ## Values: ClientIP or None
+  ## ref: https://kubernetes.io/docs/user-guide/services/
+  ##
+  sessionAffinity: None
+  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
+  ## sessionAffinityConfig:
+  ##   clientIP:
+  ##     timeoutSeconds: 300
+  ##
+  sessionAffinityConfig: {}
+  ## @param service.clusterIP ZooKeeper service Cluster IP
+  ## e.g.:
+  ## clusterIP: None
+  ##
+  clusterIP: ""
+  ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP
+  ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  loadBalancerIP: ""
+  ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources
+  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+  ## e.g:
+  ## loadBalancerSourceRanges:
+  ##   - 10.10.10.0/24
+  ##
+  loadBalancerSourceRanges: []
+  ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy
+  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## @param service.annotations Additional custom annotations for ZooKeeper service
+  ##
+  annotations: {}
+  ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value)
+  ##
+  extraPorts: []
+  ## @param service.headless.annotations Annotations for the Headless Service
+  ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods
+  ## @param service.headless.servicenameOverride String to partially override headless service name
+  ##
+  headless:
+    publishNotReadyAddresses: true
+    annotations: {}
+    servicenameOverride: ""
+## Network policies
+## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
+##
+networkPolicy:
+  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
+  ##
+  enabled: false
+  ## @param networkPolicy.allowExternal Don't require client label for connections
+  ## When set to false, only pods with the correct client label will have network access to the port Redis&reg; is
+  ## listening on. When true, zookeeper accept connections from any source (with the correct destination port).
+  ##
+  allowExternal: true
+
+## @section Other Parameters
+
+## Service account for ZooKeeper to use.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount:
+  ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod
+  ##
+  create: false
+  ## @param serviceAccount.name The name of the ServiceAccount to use.
+  ## If not set and create is true, a name is generated using the common.names.fullname template
+  ##
+  name: ""
+  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
+  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
+  ##
+  automountServiceAccountToken: true
+  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
+  ##
+  annotations: {}
+
+## @section Persistence parameters
+
+## Enable persistence using Persistent Volume Claims
+## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+  ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir
+  ##
+  enabled: true
+  ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica)
+  ##
+  existingClaim: ""
+  ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  storageClass: ""
+  ## @param persistence.accessModes PVC Access modes
+  ##
+  accessModes:
+    - ReadWriteOnce
+  ## @param persistence.size PVC Storage Request for ZooKeeper data volume
+  ##
+  size: 8Gi
+  ## @param persistence.annotations Annotations for the PVC
+  ##
+  annotations: {}
+  ## @param persistence.labels Labels for the PVC
+  ##
+  labels: {}
+  ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC
+  ## If set, the PVC can't have a PV dynamically provisioned for it
+  ## E.g.
+  ## selector:
+  ##   matchLabels:
+  ##     app: my-app
+  ##
+  selector: {}
+  ## Persistence for a dedicated data log directory
+  ##
+  dataLogDir:
+    ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory
+    ##
+    size: 8Gi
+    ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory
+    ## If defined, PVC must be created manually before volume will be bound
+    ## The value is evaluated as a template
+    ##
+    existingClaim: ""
+    ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC
+    ## If set, the PVC can't have a PV dynamically provisioned for it
+    ## E.g.
+    ## selector:
+    ##   matchLabels:
+    ##     app: my-app
+    ##
+    selector: {}
+
+## @section Volume Permissions parameters
+##
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
+##
+volumePermissions:
+  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
+  ##
+  enabled: false
+  ## @param volumePermissions.image.registry Init container volume-permissions image registry
+  ## @param volumePermissions.image.repository Init container volume-permissions image repository
+  ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
+  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
+  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
+  ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
+  ##
+  image:
+    registry: docker.io
+    repository: bitnami/bitnami-shell
+    tag: 11-debian-11-r118
+    digest: ""
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ## Example:
+    ## pullSecrets:
+    ##   - myRegistryKeySecretName
+    ##
+    pullSecrets: []
+  ## Init container resource requests and limits
+  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ## @param volumePermissions.resources.limits Init container volume-permissions resource limits
+  ## @param volumePermissions.resources.requests Init container volume-permissions resource requests
+  ##
+  resources:
+    limits: {}
+    requests: {}
+  ## Init container' Security Context
+  ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
+  ## and not the below volumePermissions.containerSecurityContext.runAsUser
+  ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context
+  ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
+  ##
+  containerSecurityContext:
+    enabled: true
+    runAsUser: 0
+
+## @section Metrics parameters
+##
+
+## ZooKeeper Prometheus Exporter configuration
+##
+metrics:
+  ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint
+  ##
+  enabled: false
+  ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port
+  ##
+  containerPort: 9141
+  ## Service configuration
+  ##
+  service:
+    ## @param metrics.service.type ZooKeeper Prometheus Exporter service type
+    ##
+    type: ClusterIP
+    ## @param metrics.service.port ZooKeeper Prometheus Exporter service port
+    ##
+    port: 9141
+    ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "{{ .Values.metrics.service.port }}"
+      prometheus.io/path: "/metrics"
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
+    ##
+    enabled: false
+    ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
+    ##
+    namespace: ""
+    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    interval: ""
+    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    scrapeTimeout: ""
+    ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
+    ##
+    additionalLabels: {}
+    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    selector: {}
+    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
+    ##
+    relabelings: []
+    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
+    ##
+    metricRelabelings: []
+    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
+    ##
+    honorLabels: false
+    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
+    ##
+    jobLabel: ""
+  ## Prometheus Operator PrometheusRule configuration
+  ##
+  prometheusRule:
+    ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
+    ##
+    enabled: false
+    ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
+    ##
+    namespace: ""
+    ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
+    ##
+    additionalLabels: {}
+    ## @param metrics.prometheusRule.rules PrometheusRule definitions
+    ##  - alert: ZooKeeperSyncedFollowers
+    ##    annotations:
+    ##      message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one).
+    ##    expr: max(synced_followers{service="my-release-metrics"}) < 2
+    ##    for: 5m
+    ##    labels:
+    ##      severity: critical
+    ##  - alert: ZooKeeperOutstandingRequests
+    ##    annotations:
+    ##      message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole.
+    ##    expr: outstanding_requests{service="my-release-metrics"} > 10
+    ##    for: 5m
+    ##    labels:
+    ##      severity: critical
+    ##
+    rules: []
+
+## @section TLS/SSL parameters
+##
+
+## Enable SSL/TLS encryption
+##
+tls:
+  client:
+    ## @param tls.client.enabled Enable TLS for client connections
+    ##
+    enabled: false
+    ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need".
+    ##
+    auth: "none"
+    ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications
+    ## Currently only supports PEM certificates
+    ##
+    autoGenerated: false
+    ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications
+    ##
+    existingSecret: ""
+    ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore.
+    ##
+    existingSecretKeystoreKey: ""
+    ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore.
+    ##
+    existingSecretTruststoreKey: ""
+    ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections
+    ##
+    keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
+    ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections
+    ##
+    truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
+    ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords
+    ##
+    passwordsSecretName: ""
+    ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore.
+    ##
+    passwordsSecretKeystoreKey: ""
+    ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore.
+    ##
+    passwordsSecretTruststoreKey: ""
+    ## @param tls.client.keystorePassword Password to access KeyStore if needed
+    ##
+    keystorePassword: ""
+    ## @param tls.client.truststorePassword Password to access TrustStore if needed
+    ##
+    truststorePassword: ""
+  quorum:
+    ## @param tls.quorum.enabled Enable TLS for quorum protocol
+    ##
+    enabled: false
+    ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need".
+    ##
+    auth: "none"
+    ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates.
+    ##
+    autoGenerated: false
+    ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol
+    ##
+    existingSecret: ""
+    ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore.
+    ##
+    existingSecretKeystoreKey: ""
+    ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore.
+    ##
+    existingSecretTruststoreKey: ""
+    ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol
+    ##
+    keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
+    ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol
+    ##
+    truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
+    ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords
+    ##
+    passwordsSecretName: ""
+    ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore.
+    ##
+    passwordsSecretKeystoreKey: ""
+    ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore.
+    ##
+    passwordsSecretTruststoreKey: ""
+    ## @param tls.quorum.keystorePassword Password to access KeyStore if needed
+    ##
+    keystorePassword: ""
+    ## @param tls.quorum.truststorePassword Password to access TrustStore if needed
+    ##
+    truststorePassword: ""
+  ## Init container resource requests and limits
+  ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ## @param tls.resources.limits The resources limits for the TLS init container
+  ## @param tls.resources.requests The requested resources for the TLS init container
+  ##
+  resources:
+    limits: {}
+    requests: {}

+ 314 - 0
kafka/helm/kafka/templates/NOTES.txt

@@ -0,0 +1,314 @@
+CHART NAME: {{ .Chart.Name }}
+CHART VERSION: {{ .Chart.Version }}
+APP VERSION: {{ .Chart.AppVersion }}
+
+{{- if .Values.diagnosticMode.enabled }}
+The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
+
+  command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
+  args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
+
+Get the list of pods by executing:
+
+  kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
+
+Access the pod you want to debug by executing
+
+  kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
+
+In order to replicate the container startup scripts execute this command:
+
+    /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh
+
+{{- else }}
+
+{{- $replicaCount := int .Values.replicaCount -}}
+{{- $releaseNamespace := .Release.Namespace -}}
+{{- $clusterDomain := .Values.clusterDomain -}}
+{{- $fullname := include "common.names.fullname" . -}}
+{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}}
+{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}}
+{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}}
+{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}}
+{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 -d | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}}
+{{- $tlsPassword :=  ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}}
+{{- $servicePort := int .Values.service.ports.client -}}
+
+{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }}
+---------------------------------------------------------------------------------------------
+ WARNING
+
+    By specifying "serviceType=LoadBalancer" and not configuring the authentication
+    you have most likely exposed the Kafka service externally without any
+    authentication mechanism.
+
+    For security reasons, we strongly suggest that you switch to "ClusterIP" or
+    "NodePort". As alternative, you can also configure the Kafka authentication.
+
+---------------------------------------------------------------------------------------------
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster:
+
+    {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}
+
+Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster:
+
+{{- $brokerList := list }}
+{{- range $e, $i := until $replicaCount }}
+{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }}
+{{- end }}
+{{ join "\n" $brokerList | nindent 4 }}
+{{- if (include "kafka.client.saslAuthentication" .) }}
+
+You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below:
+
+    - kafka_jaas.conf:
+
+KafkaClient {
+{{- if $saslMechanisms | regexFind "scram" }}
+org.apache.kafka.common.security.scram.ScramLoginModule required
+{{- else }}
+org.apache.kafka.common.security.plain.PlainLoginModule required
+{{- end }}
+username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}"
+password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)";
+};
+
+    - client.properties:
+
+security.protocol={{ $clientProtocol }}
+{{- if $saslMechanisms | regexFind "scram-sha-256" }}
+sasl.mechanism=SCRAM-SHA-256
+{{- else if $saslMechanisms | regexFind "scram-sha-512" }}
+sasl.mechanism=SCRAM-SHA-512
+{{- else }}
+sasl.mechanism=PLAIN
+{{- end }}
+{{- if eq $clientProtocol "SASL_SSL" }}
+ssl.truststore.type={{ upper .Values.auth.tls.type }}
+    {{- if eq .Values.auth.tls.type "jks" }}
+ssl.truststore.location=/tmp/kafka.truststore.jks
+        {{- if not (empty $tlsPassword) }}
+ssl.truststore.password={{ $tlsPassword }}
+        {{- end }}
+    {{- else if eq .Values.auth.tls.type "pem" }}
+ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+    {{- end }}
+    {{- if eq $tlsEndpointIdentificationAlgorithm "" }}
+ssl.endpoint.identification.algorithm=
+    {{- end }}
+{{- end }}
+
+{{- else if (include "kafka.client.tlsEncryption" .) }}
+
+You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below:
+
+security.protocol={{ $clientProtocol }}
+ssl.truststore.type={{ upper .Values.auth.tls.type }}
+{{- if eq .Values.auth.tls.type "jks" }}
+ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }}
+    {{- if not (empty $tlsPassword) }}
+ssl.truststore.password={{ $tlsPassword }}
+    {{- end }}
+{{- else if eq .Values.auth.tls.type "pem" }}
+ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+{{- end }}
+{{- if eq .Values.auth.clientProtocol "mtls" }}
+ssl.keystore.type={{ upper .Values.auth.tls.type }}
+    {{- if eq .Values.auth.tls.type "jks" }}
+ssl.keystore.location=/tmp/client.keystore.jks
+        {{- if not (empty $tlsPassword) }}
+ssl.keystore.password={{ $tlsPassword }}
+        {{- end }}
+    {{- else if eq .Values.auth.tls.type "pem" }}
+ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \
+... \
+-----END ENCRYPTED PRIVATE KEY-----
+    {{- end }}
+{{- end }}
+{{- if eq $tlsEndpointIdentificationAlgorithm "" }}
+ssl.endpoint.identification.algorithm=
+{{- end }}
+
+{{- end }}
+
+To create a pod that you can use as a Kafka client run the following commands:
+
+    kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity
+    {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}
+    kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties
+    {{- end }}
+    {{- if (include "kafka.client.saslAuthentication" .) }}
+    kubectl cp --namespace {{ $releaseNamespace }} /path/to/kafka_jaas.conf {{ $fullname }}-client:/tmp/kafka_jaas.conf
+    {{- end }}
+    {{- if and (include "kafka.client.tlsEncryption" .) (eq .Values.auth.tls.type "jks") }}
+    kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks
+    {{- if eq .Values.auth.clientProtocol "mtls" }}
+    kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks
+    {{- end }}
+    {{- end }}
+    kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash
+    {{- if (include "kafka.client.saslAuthentication" .) }}
+    export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/kafka_jaas.conf"
+    {{- end }}
+
+    PRODUCER:
+        kafka-console-producer.sh \
+            {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}
+            --producer.config /tmp/client.properties \
+            {{- end }}
+            --broker-list {{ join "," $brokerList }} \
+            --topic test
+
+    CONSUMER:
+        kafka-console-consumer.sh \
+            {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}
+            --consumer.config /tmp/client.properties \
+            {{- end }}
+            --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \
+            --topic test \
+            --from-beginning
+
+{{- if .Values.externalAccess.enabled }}
+
+To connect to your Kafka server from outside the cluster, follow the instructions below:
+
+{{- if eq "NodePort" .Values.externalAccess.service.type }}
+{{- if .Values.externalAccess.service.domain }}
+
+    Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }}
+
+{{- else }}
+
+    Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener)
+
+        1. Obtain the pod name:
+
+        kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka"
+
+        2. Obtain pod configuration:
+
+        kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners
+
+{{- end }}
+
+    Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below:
+
+        echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')"
+
+{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }}
+
+  NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
+        Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w'
+
+    Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below:
+
+        echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
+
+    Kafka Brokers port: {{ .Values.externalAccess.service.ports.external }}
+
+{{- else if eq "ClusterIP" .Values.externalAccess.service.type }}
+
+    Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }}
+
+    Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.service.ports.external }}
+
+{{- end }}
+
+{{- if not (eq $clientProtocol $externalClientProtocol) }}
+{{- if (include "kafka.client.saslAuthentication" .) }}
+
+You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below:
+
+    - kafka_jaas.conf:
+
+KafkaClient {
+{{- if $saslMechanisms | regexFind "scram" }}
+org.apache.kafka.common.security.scram.ScramLoginModule required
+{{- else }}
+org.apache.kafka.common.security.plain.PlainLoginModule required
+{{- end }}
+username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}"
+password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)";
+};
+
+    - client.properties:
+
+security.protocol={{ $externalClientProtocol }}
+{{- if $saslMechanisms | regexFind "scram-sha-256" }}
+sasl.mechanism=SCRAM-SHA-256
+{{- else if $saslMechanisms | regexFind "scram-sha-512" }}
+sasl.mechanism=SCRAM-SHA-512
+{{- else }}
+sasl.mechanism=PLAIN
+{{- end }}
+{{- if eq $externalClientProtocol "SASL_SSL" }}
+ssl.truststore.type={{ upper .Values.auth.tls.type }}
+    {{- if eq .Values.auth.tls.type "jks" }}
+ssl.truststore.location=/tmp/kafka.truststore.jks
+        {{- if not (empty $tlsPassword) }}
+ssl.truststore.password={{ $tlsPassword }}
+        {{- end }}
+    {{- else if eq .Values.auth.tls.type "pem" }}
+ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+    {{- end }}
+    {{- if eq $tlsEndpointIdentificationAlgorithm "" }}
+ssl.endpoint.identification.algorithm=
+    {{- end }}
+{{- end }}
+
+{{- else if (include "kafka.externalClient.tlsEncryption" .) }}
+
+You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below:
+
+security.protocol={{ $externalClientProtocol }}
+ssl.truststore.type={{ upper .Values.auth.tls.type }}
+{{- if eq .Values.auth.tls.type "jks" }}
+ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }}
+    {{- if not (empty $tlsPassword) }}
+ssl.truststore.password={{ $tlsPassword }}
+    {{- end }}
+{{- else if eq .Values.auth.tls.type "pem" }}
+ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+{{- end }}
+{{- if eq .Values.auth.externalClientProtocol "mtls" }}
+ssl.keystore.type={{ upper .Values.auth.tls.type }}
+    {{- if eq .Values.auth.tls.type "jks" }}
+ssl.keystore.location=/tmp/client.keystore.jks
+        {{- if not (empty $tlsPassword) }}
+ssl.keystore.password={{ $tlsPassword }}
+        {{- end }}
+    {{- else if eq .Values.auth.tls.type "pem" }}
+ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \
+... \
+-----END CERTIFICATE-----
+ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \
+... \
+-----END ENCRYPTED PRIVATE KEY-----
+    {{- end }}
+{{- end }}
+{{- if eq $tlsEndpointIdentificationAlgorithm "" }}
+ssl.endpoint.identification.algorithm=
+{{- end }}
+
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{- include "kafka.checkRollingTags" . }}
+{{- include "kafka.validateValues" . }}

+ 555 - 0
kafka/helm/kafka/templates/_helpers.tpl

@@ -0,0 +1,555 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kafka.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified zookeeper name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "kafka.zookeeper.fullname" -}}
+{{- if .Values.zookeeper.fullnameOverride -}}
+{{- .Values.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use
+ */}}
+{{- define "kafka.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Kafka image name
+*/}}
+{{- define "kafka.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container auto-discovery image)
+*/}}
+{{- define "kafka.externalAccess.autoDiscovery.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "kafka.volumePermissions.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Create a default fully qualified Kafka exporter name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "kafka.metrics.kafka.fullname" -}}
+  {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
+{{- end -}}
+
+{{/*
+ Create the name of the service account to use for Kafka exporter pods
+ */}}
+{{- define "kafka.metrics.kafka.serviceAccountName" -}}
+{{- if .Values.metrics.kafka.serviceAccount.create -}}
+    {{ default (include "kafka.metrics.kafka.fullname" .) .Values.metrics.kafka.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.metrics.kafka.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Kafka exporter image name
+*/}}
+{{- define "kafka.metrics.kafka.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.kafka.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper JMX exporter image name
+*/}}
+{{- define "kafka.metrics.jmx.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "kafka.imagePullSecrets" -}}
+{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.externalAccess.autoDiscovery.image .Values.volumePermissions.image .Values.metrics.kafka.image .Values.metrics.jmx.image) "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper Storage Class
+*/}}
+{{- define "kafka.storageClass" -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
+*/}}
+{{- if .Values.global -}}
+    {{- if .Values.global.storageClass -}}
+        {{- if (eq "-" .Values.global.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.global.storageClass -}}
+        {{- end -}}
+    {{- else -}}
+        {{- if .Values.persistence.storageClass -}}
+              {{- if (eq "-" .Values.persistence.storageClass) -}}
+                  {{- printf "storageClassName: \"\"" -}}
+              {{- else }}
+                  {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+              {{- end -}}
+        {{- end -}}
+    {{- end -}}
+{{- else -}}
+    {{- if .Values.persistence.storageClass -}}
+        {{- if (eq "-" .Values.persistence.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+        {{- end -}}
+    {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if authentication via SASL should be configured for client communications
+*/}}
+{{- define "kafka.client.saslAuthentication" -}}
+{{- $saslProtocols := list "sasl" "sasl_tls" -}}
+{{- if has .Values.auth.clientProtocol $saslProtocols -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if authentication via SASL should be configured for inter-broker communications
+*/}}
+{{- define "kafka.interBroker.saslAuthentication" -}}
+{{- $saslProtocols := list "sasl" "sasl_tls" -}}
+{{- if has .Values.auth.interBrokerProtocol $saslProtocols -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if encryption via TLS for client connections should be configured
+*/}}
+{{- define "kafka.client.tlsEncryption" -}}
+{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}}
+{{- if (has .Values.auth.clientProtocol $tlsProtocols) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the configured value for the external client protocol, defaults to the same value as clientProtocol
+*/}}
+{{- define "kafka.externalClientProtocol" -}}
+    {{- coalesce .Values.auth.externalClientProtocol .Values.auth.clientProtocol -}}
+{{- end -}}
+
+{{/*
+Return true if encryption via TLS for external client connections should be configured
+*/}}
+{{- define "kafka.externalClient.tlsEncryption" -}}
+{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}}
+{{- if (has (include "kafka.externalClientProtocol" . ) $tlsProtocols) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if encryption via TLS for inter broker communication connections should be configured
+*/}}
+{{- define "kafka.interBroker.tlsEncryption" -}}
+{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}}
+{{- if (has .Values.auth.interBrokerProtocol $tlsProtocols) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if encryption via TLS should be configured
+*/}}
+{{- define "kafka.tlsEncryption" -}}
+{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) (include "kafka.externalClient.tlsEncryption" .) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the type of listener
+Usage:
+{{ include "kafka.listenerType" ( dict "protocol" .Values.path.to.the.Value ) }}
+*/}}
+{{- define "kafka.listenerType" -}}
+{{- if eq .protocol "plaintext" -}}
+PLAINTEXT
+{{- else if or (eq .protocol "tls") (eq .protocol "mtls") -}}
+SSL
+{{- else if eq .protocol "sasl_tls" -}}
+SASL_SSL
+{{- else if eq .protocol "sasl" -}}
+SASL_PLAINTEXT
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the protocol used with zookeeper
+*/}}
+{{- define "kafka.zookeeper.protocol" -}}
+{{- if and .Values.auth.zookeeper.tls.enabled .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}}
+SASL_SSL
+{{- else if and .Values.auth.zookeeper.tls.enabled -}}
+SSL
+{{- else if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}}
+SASL
+{{- else -}}
+PLAINTEXT
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the Kafka JAAS credentials secret
+*/}}
+{{- define "kafka.jaasSecretName" -}}
+{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}}
+{{- if $secretName -}}
+    {{- printf "%s" (tpl $secretName $) -}}
+{{- else -}}
+    {{- printf "%s-jaas" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a JAAS credentials secret object should be created
+*/}}
+{{- define "kafka.createJaasSecret" -}}
+{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}}
+{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (empty $secretName) -}}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a TLS credentials secret object should be created
+*/}}
+{{- define "kafka.createTlsSecret" -}}
+{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (eq .Values.auth.tls.type "pem") .Values.auth.tls.autoGenerated }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the Kafka configuration configmap
+*/}}
+{{- define "kafka.configmapName" -}}
+{{- if .Values.existingConfigmap -}}
+    {{- printf "%s" (tpl .Values.existingConfigmap $) -}}
+{{- else -}}
+    {{- printf "%s-configuration" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Returns the secret name for the Kafka Provisioning client
+*/}}
+{{- define "kafka.client.passwordsSecretName" -}}
+{{- if .Values.provisioning.auth.tls.passwordsSecret -}}
+    {{- printf "%s" (tpl .Values.provisioning.auth.tls.passwordsSecret $) -}}
+{{- else -}}
+    {{- printf "%s-client-secret" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use for the Kafka Provisioning client
+*/}}
+{{- define "kafka.provisioning.serviceAccountName" -}}
+{{- if .Values.provisioning.serviceAccount.create -}}
+    {{ default (include "common.names.fullname" .) .Values.provisioning.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.provisioning.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created
+*/}}
+{{- define "kafka.createConfigmap" -}}
+{{- if and .Values.config (not .Values.existingConfigmap) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the Kafka log4j ConfigMap name.
+*/}}
+{{- define "kafka.log4j.configMapName" -}}
+{{- if .Values.existingLog4jConfigMap -}}
+    {{- printf "%s" (tpl .Values.existingLog4jConfigMap $) -}}
+{{- else -}}
+    {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a log4j ConfigMap object should be created.
+*/}}
+{{- define "kafka.log4j.createConfigMap" -}}
+{{- if and .Values.log4j (not .Values.existingLog4jConfigMap) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the SASL mechanism to use for the Kafka exporter to access Kafka
+The exporter uses a different nomenclature so we need to do this hack
+*/}}
+{{- define "kafka.metrics.kafka.saslMechanism" -}}
+{{- $saslMechanisms := .Values.auth.sasl.mechanisms }}
+{{- if contains "scram-sha-512" $saslMechanisms }}
+    {{- print "scram-sha512" -}}
+{{- else if contains "scram-sha-256" $saslMechanisms }}
+    {{- print "scram-sha256" -}}
+{{- else -}}
+    {{- print "plain" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the Kafka configuration configmap
+*/}}
+{{- define "kafka.metrics.jmx.configmapName" -}}
+{{- if .Values.metrics.jmx.existingConfigmap -}}
+    {{- printf "%s" (tpl .Values.metrics.jmx.existingConfigmap $) -}}
+{{- else -}}
+    {{- printf "%s-jmx-configuration" (include "common.names.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if a configmap object should be created
+*/}}
+{{- define "kafka.metrics.jmx.createConfigmap" -}}
+{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Check if there are rolling tags in the images
+*/}}
+{{- define "kafka.checkRollingTags" -}}
+{{- include "common.warnings.rollingTag" .Values.image }}
+{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }}
+{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }}
+{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }}
+{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "kafka.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "kafka.validateValues.authProtocols" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.nodePortListLength" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalIPListLength" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.domainSpecified" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}}
+{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}}
+{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets.length" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.tlsPasswords" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.kraftMode" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.ClusterIdDefinedIfKraft" .) -}}
+{{- $messages := append $messages (include "kafka.validateValues.controllerQuorumVotersDefinedIfKraft" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - Authentication protocols for Kafka */}}
+{{- define "kafka.validateValues.authProtocols" -}}
+{{- $authProtocols := list "plaintext" "tls" "mtls" "sasl" "sasl_tls" -}}
+{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) (not (has (include "kafka.externalClientProtocol" . ) $authProtocols)) -}}
+kafka: auth.clientProtocol auth.externalClientProtocol auth.interBrokerProtocol
+    Available authentication protocols are "plaintext", "tls", "mtls", "sasl" and "sasl_tls"
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - number of replicas must be the same as NodePort list */}}
+{{- define "kafka.validateValues.nodePortListLength" -}}
+{{- $replicaCount := int .Values.replicaCount -}}
+{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts -}}
+{{- $nodePortListIsEmpty := empty .Values.externalAccess.service.nodePorts -}}
+{{- $nodePortListLengthEqualsReplicaCount := eq $nodePortListLength $replicaCount -}}
+{{- $externalIPListIsEmpty := empty .Values.externalAccess.service.externalIPs -}}
+{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.service.type "NodePort") (or (and (not $nodePortListIsEmpty) (not $nodePortListLengthEqualsReplicaCount)) (and $nodePortListIsEmpty $externalIPListIsEmpty)) -}}
+kafka: .Values.externalAccess.service.nodePorts
+    Number of replicas and nodePort array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length nodePorts = {{ $nodePortListLength }} - {{ $externalIPListIsEmpty }}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - number of replicas must be the same as externalIPs list */}}
+{{- define "kafka.validateValues.externalIPListLength" -}}
+{{- $replicaCount := int .Values.replicaCount -}}
+{{- $externalIPListLength := len .Values.externalAccess.service.externalIPs -}}
+{{- $externalIPListIsEmpty := empty .Values.externalAccess.service.externalIPs -}}
+{{- $externalIPListEqualsReplicaCount := eq $externalIPListLength $replicaCount -}}
+{{- $nodePortListIsEmpty := empty .Values.externalAccess.service.nodePorts -}}
+{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (eq .Values.externalAccess.service.type "NodePort") (or (and (not $externalIPListIsEmpty) (not $externalIPListEqualsReplicaCount)) (and $externalIPListIsEmpty $nodePortListIsEmpty)) -}}
+kafka: .Values.externalAccess.service.externalIPs
+    Number of replicas and externalIPs array length must be the same. Currently: replicaCount = {{ $replicaCount }} and length externalIPs = {{ $externalIPListLength }}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - domain must be defined if external service type ClusterIP */}}
+{{- define "kafka.validateValues.domainSpecified" -}}
+{{- if and (eq .Values.externalAccess.service.type "ClusterIP") (eq .Values.externalAccess.service.domain "") -}}
+kafka: .Values.externalAccess.service.domain
+    Domain must be specified if service type ClusterIP is set for external service
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - service type for external access */}}
+{{- define "kafka.validateValues.externalAccessServiceType" -}}
+{{- if and (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}}
+kafka: externalAccess.service.type
+    Available service type for external access are NodePort, LoadBalancer or ClusterIP.
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}}
+{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}}
+{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }}
+kafka: rbac.create
+    By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true"
+    an initContainer will be used to auto-detect the external IPs/ports by querying the
+    K8s API. Please note this initContainer requires specific RBAC resources. You can create them
+    by specifying "--set rbac.create=true".
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}}
+{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}}
+{{- $loadBalancerNameListLength := len .Values.externalAccess.service.loadBalancerNames -}}
+{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}}
+{{- if and .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }}
+kafka: externalAccess.service.loadBalancerNames or externalAccess.service.loadBalancerIPs
+    By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and
+    "externalAccess.service.type=LoadBalancer" at least one of externalAccess.service.loadBalancerNames
+    or externalAccess.service.loadBalancerIPs  must be set and the length of those arrays must be equal
+    to the number of replicas.
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}}
+{{- define "kafka.validateValues.externalAccessServiceList" -}}
+{{- $replicaCount := int .context.Values.replicaCount }}
+{{- $listLength := len (get .context.Values.externalAccess.service .element) -}}
+{{- if and .context.Values.externalAccess.enabled (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }}
+kafka: externalAccess.service.{{ .element }}
+    Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}}
+{{- define "kafka.validateValues.saslMechanisms" -}}
+{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (not .Values.auth.sasl.mechanisms) }}
+kafka: auth.sasl.mechanisms
+    The SASL mechanisms are required when either auth.clientProtocol or auth.interBrokerProtocol use SASL or Zookeeper user is provided.
+{{- end }}
+{{- if not (contains .Values.auth.sasl.interBrokerMechanism .Values.auth.sasl.mechanisms) }}
+kafka: auth.sasl.mechanisms
+    auth.sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}}
+{{- define "kafka.validateValues.tlsSecrets" -}}
+{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) }}
+kafka: auth.tls.existingSecrets
+    A secret containing the Kafka JKS keystores and truststore is required
+    when TLS encryption in enabled and TLS format is "JKS"
+{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (not .Values.auth.tls.autoGenerated) }}
+kafka: auth.tls.existingSecrets
+    A secret containing the Kafka TLS certificates and keys is required
+    when TLS encryption in enabled and TLS format is "PEM"
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka - The number of secrets containing TLS certs should be equal to the number of replicas */}}
+{{- define "kafka.validateValues.tlsSecrets.length" -}}
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and (include "kafka.tlsEncryption" .) (not (empty .Values.auth.tls.existingSecrets)) }}
+{{- $existingSecretsLength := len .Values.auth.tls.existingSecrets }}
+{{- if ne $replicaCount $existingSecretsLength }}
+kafka: .Values.auth.tls.existingSecrets
+    Number of replicas and existingSecrets array length must be the same. Currently: replicaCount = {{ $replicaCount }} and existingSecrets = {{ $existingSecretsLength }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}}
+{{- define "kafka.validateValues.tlsPasswords" -}}
+{{- if and (include "kafka.client.tlsEncryption" .) .Values.provisioning.enabled (not .Values.provisioning.auth.tls.passwordsSecret) }}
+{{- if or .Values.provisioning.auth.tls.keyPasswordSecretKey .Values.provisioning.auth.tls.keystorePasswordSecretKey .Values.provisioning.auth.tls.truststorePasswordSecretKey }}
+kafka: auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey
+    auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey
+    must not be used without passwordsSecret setted.
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kafka Kraft mode. It cannot be used with zookeeper  */}}
+{{- define "kafka.validateValues.kraftMode" -}}
+{{- $externalZKlen := len .Values.externalZookeeper.servers}}
+{{- if and .Values.kraft.enabled (or .Values.zookeeper.enabled (gt $externalZKlen 0))  }}
+kafka: Kraft mode
+    You cannot use Kraft mode and Zookeeper at the same time. They are mutually exclusive. Disable zookeeper in '.Values.zookeeper.enabled'  and delete values from '.Values.externalZookeeper.servers' if you want to use Kraft mode
+{{- end -}}
+{{- end -}}
+
+{{/* Validate ClusterId value. It must be defined if Kraft mode is used.  */}}
+{{- define "kafka.validateValues.ClusterIdDefinedIfKraft" -}}
+{{- if and .Values.kraft.enabled (not .Values.kraft.clusterId) (gt (int .Values.replicaCount) 1) }}
+kafka: Kraft mode
+    .Values.kraft.clusterId must not be empty if .Values.kraft.enabled set to true and .Values.replicaCount > 1.
+{{- end -}}
+{{- end -}}
+
+{{/* Validate controllerQuorumVoters value. It must be defined if it is broker-only deployment.  */}}
+{{- define "kafka.validateValues.controllerQuorumVotersDefinedIfKraft" -}}
+{{- if and .Values.kraft.enabled (not .Values.kraft.controllerQuorumVoters) (not (contains "controller" .Values.kraft.processRoles)) }}
+kafka: Kraft mode
+    .Values.kraft.controllerQuorumVoters must not be empty if .Values.kraft.enabled set to true and .Values.kraft.processRoles does not contain "controller".
+    If you deploy brokers without controllers you have to define external controllers with .Values.kraft.controllerQuorumVoters
+{{- end -}}
+{{- end -}}

+ 17 - 0
kafka/helm/kafka/templates/configmap.yaml

@@ -0,0 +1,17 @@
+{{- if (include "kafka.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-configuration" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  server.properties: |-
+    {{ .Values.config | nindent 4 }}
+{{- end -}}

+ 4 - 0
kafka/helm/kafka/templates/extra-list.yaml

@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}

+ 95 - 0
kafka/helm/kafka/templates/jaas-secret.yaml

@@ -0,0 +1,95 @@
+
+{{- $port := print .Values.service.ports.client }}
+{{- $host := list }}
+{{- $bootstrapServers := list }}
+{{- range $i, $e := until (int .Values.replicaCount) }}
+  {{- $broker := printf "%s-%s.%s-headless.%s.svc.%s" (include "common.names.fullname" $) (print $i) (include "common.names.fullname" $) $.Release.Namespace $.Values.clusterDomain }}
+  {{- $host = append $host $broker }}
+  {{- $bootstrapServers = append $bootstrapServers (printf "%s:%s" $broker $port) }}
+{{- end }}
+{{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }}
+{{- $clientPasswords := .Values.auth.sasl.jaas.clientPasswords }}
+{{- if not $clientPasswords }}
+  {{- $clientPasswords = list }}
+{{- range $clientUsers }}
+  {{- $clientPasswords = append $clientPasswords (randAlphaNum 10) }}
+{{- end }}
+{{- end }}
+{{- if (include "kafka.createJaasSecret" .) }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ printf "%s-jaas" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  {{- if (include "kafka.client.saslAuthentication" .) }}
+  client-passwords: {{ join "," $clientPasswords | b64enc | quote }}
+  system-user-password: {{ index $clientPasswords 0 | b64enc | quote }}
+  {{- end }}
+  {{- $zookeeperUser := .Values.auth.sasl.jaas.zookeeperUser }}
+  {{- if and .Values.zookeeper.auth.client.enabled $zookeeperUser }}
+  {{- $zookeeperPassword := .Values.auth.sasl.jaas.zookeeperPassword }}
+  zookeeper-password: {{ default (randAlphaNum 10) $zookeeperPassword | b64enc | quote }}
+  {{- end }}
+  {{- if (include "kafka.interBroker.saslAuthentication" .) }}
+  {{- $interBrokerPassword := .Values.auth.sasl.jaas.interBrokerPassword }}
+  inter-broker-password: {{ default (randAlphaNum 10) $interBrokerPassword | b64enc | quote }}
+  {{- end }}
+{{- end }}
+{{- if .Values.serviceBindings.enabled }}
+{{- if (include "kafka.client.saslAuthentication" .) }}
+{{- range $i, $e := until (len $clientUsers) }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" $ }}-svcbind-user-{{ $i }}
+  namespace: {{ $.Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" $ | nindent 4 }}
+    {{- if $.Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if $.Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: servicebinding.io/kafka
+data:
+  provider: {{ print "bitnami" | b64enc | quote }}
+  type: {{ print "kafka" | b64enc | quote }}
+  username: {{ index $clientUsers $i | b64enc | quote }}
+  password: {{ index $clientPasswords $i | b64enc | quote }}
+  host: {{ join "," $host | b64enc | quote }}
+  port: {{ print $port | b64enc | quote }}
+  bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }}
+{{- end }}
+{{- else }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.names.fullname" . }}-svcbind
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: servicebinding.io/kafka
+data:
+  provider: {{ print "bitnami" | b64enc | quote }}
+  type: {{ print "kafka" | b64enc | quote }}
+  host: {{ join "," $host | b64enc | quote }}
+  port: {{ print $port | b64enc | quote }}
+  bootstrap-servers: {{ join "," $bootstrapServers | b64enc | quote }}
+{{- end }}
+{{- end }}

+ 68 - 0
kafka/helm/kafka/templates/jmx-configmap.yaml

@@ -0,0 +1,68 @@
+{{- if (include "kafka.metrics.jmx.createConfigmap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  jmx-kafka-prometheus.yml: |-
+    {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }}
+    rules:
+      - pattern: kafka.controller<type=(ControllerChannelManager), name=(QueueSize), broker-id=(\d+)><>(Value)
+        name: kafka_controller_$1_$2_$4
+        labels:
+          broker_id: "$3"
+      - pattern: kafka.controller<type=(ControllerChannelManager), name=(TotalQueueSize)><>(Value)
+        name: kafka_controller_$1_$2_$3
+      - pattern: kafka.controller<type=(KafkaController), name=(.+)><>(Value)
+        name: kafka_controller_$1_$2_$3
+      - pattern: kafka.controller<type=(ControllerStats), name=(.+)><>(Count)
+        name: kafka_controller_$1_$2_$3
+      - pattern : kafka.network<type=(Processor), name=(IdlePercent), networkProcessor=(.+)><>(Value)
+        name: kafka_network_$1_$2_$4
+        labels:
+          network_processor: $3
+      - pattern : kafka.network<type=(RequestMetrics), name=(.+), request=(.+)><>(Count|Value)
+        name: kafka_network_$1_$2_$4
+        labels:
+          request: $3
+      - pattern : kafka.network<type=(SocketServer), name=(.+)><>(Count|Value)
+        name: kafka_network_$1_$2_$3
+      - pattern : kafka.network<type=(RequestChannel), name=(.+)><>(Count|Value)
+        name: kafka_network_$1_$2_$3
+      - pattern: kafka.server<type=(.+), name=(.+), topic=(.+)><>(Count|OneMinuteRate)
+        name: kafka_server_$1_$2_$4
+        labels:
+          topic: $3
+      - pattern: kafka.server<type=(ReplicaFetcherManager), name=(.+), clientId=(.+)><>(Value)
+        name: kafka_server_$1_$2_$4
+        labels:
+          client_id: "$3"
+      - pattern: kafka.server<type=(DelayedOperationPurgatory), name=(.+), delayedOperation=(.+)><>(Value)
+        name: kafka_server_$1_$2_$3_$4
+      - pattern: kafka.server<type=(.+), name=(.+)><>(Count|Value|OneMinuteRate)
+        name: kafka_server_$1_total_$2_$3
+      - pattern: kafka.server<type=(.+)><>(queue-size)
+        name: kafka_server_$1_$2
+      - pattern: java.lang<type=(.+), name=(.+)><(.+)>(\w+)
+        name: java_lang_$1_$4_$3_$2
+      - pattern: java.lang<type=(.+), name=(.+)><>(\w+)
+        name: java_lang_$1_$3_$2
+      - pattern : java.lang<type=(.*)>
+      - pattern: kafka.log<type=(.+), name=(.+), topic=(.+), partition=(.+)><>Value
+        name: kafka_log_$1_$2
+        labels:
+          topic: $3
+          partition: $4
+      {{- if .Values.metrics.jmx.extraRules }}
+      {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }}
+      {{- end }}
+{{- end -}}

+ 34 - 0
kafka/helm/kafka/templates/jmx-metrics-svc.yaml

@@ -0,0 +1,34 @@
+{{- if .Values.metrics.jmx.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.metrics.jmx.service.annotations }}
+    {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: ClusterIP
+  sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }}
+  {{- if .Values.metrics.jmx.service.clusterIP }}
+  clusterIP: {{ .Values.metrics.jmx.service.clusterIP }}
+  {{- end }}
+  ports:
+    - name: http-metrics
+      port: {{ .Values.metrics.jmx.service.ports.metrics }}
+      protocol: TCP
+      targetPort: metrics
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: kafka
+{{- end }}

+ 171 - 0
kafka/helm/kafka/templates/kafka-metrics-deployment.yaml

@@ -0,0 +1,171 @@
+{{- if .Values.metrics.kafka.enabled }}
+{{- $replicaCount := int .Values.replicaCount -}}
+{{- $releaseNamespace := .Release.Namespace -}}
+{{- $clusterDomain := .Values.clusterDomain -}}
+{{- $fullname := include "common.names.fullname" . -}}
+{{- $servicePort := int .Values.service.ports.client -}}
+apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "kafka.metrics.kafka.fullname" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: cluster-metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  replicas: 1
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: cluster-metrics
+  template:
+    metadata:
+      labels: {{- include "common.labels.standard" . | nindent 8 }}
+        app.kubernetes.io/component: cluster-metrics
+        {{- if .Values.metrics.kafka.podLabels }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podLabels "context" $) | nindent 8 }}
+        {{- end }}
+      annotations:
+        {{- if .Values.metrics.kafka.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+    spec:
+      {{- include "kafka.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.metrics.kafka.hostAliases }}
+      hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.affinity }}
+      affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }}
+      {{- else }}
+      affinity:
+        podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
+        podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
+        nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.nodeSelector }}
+      nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.tolerations }}
+      tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.topologySpreadConstraints }}
+      topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.priorityClassName }}
+      priorityClassName: {{ .Values.metrics.kafka.priorityClassName }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.schedulerName }}
+      schedulerName: {{ .Values.metrics.kafka.schedulerName }}
+      {{- end }}
+      {{- if .Values.metrics.kafka.podSecurityContext.enabled }}
+      securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }}
+      {{- if .Values.metrics.kafka.initContainers }}
+      initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }}
+      {{- end }}
+      containers:
+        - name: kafka-exporter
+          image: {{ include "kafka.metrics.kafka.image" . }}
+          imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }}
+          {{- if .Values.metrics.kafka.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+          {{- else if .Values.metrics.kafka.command }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }}
+          {{- else }}
+          command:
+            - bash
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+          {{- else if .Values.metrics.kafka.args }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }}
+          {{- else }}
+          args:
+            - -ce
+            - |
+              kafka_exporter \
+              {{- range $i, $e := until $replicaCount }}
+              --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \
+              {{- end }}
+              {{- if (include "kafka.client.saslAuthentication" .) }}
+              --sasl.enabled \
+              --sasl.username=$SASL_USERNAME \
+              --sasl.password=$SASL_USER_PASSWORD \
+              --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \
+              {{- end }}
+              {{- if (include "kafka.client.tlsEncryption" .) }}
+              --tls.enabled \
+              {{- if .Values.metrics.kafka.certificatesSecret }}
+              --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \
+              --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \
+              {{- if .Values.metrics.kafka.tlsCaSecret }}
+              --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \
+              {{- else }}
+              --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \
+              {{- end }}
+              {{- end }}
+              {{- end }}
+              {{- range $key, $value := .Values.metrics.kafka.extraFlags }}
+              --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \
+              {{- end }}
+              --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }}
+          {{- end }}
+          {{- if (include "kafka.client.saslAuthentication" .) }}
+          {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }}
+          env:
+            - name: SASL_USERNAME
+              value: {{ index $clientUsers 0 | quote }}
+            - name: SASL_USER_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "kafka.jaasSecretName" . }}
+                  key: system-user-password
+          {{- end }}
+          ports:
+            - name: metrics
+              containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }}
+          {{- if .Values.metrics.kafka.resources }}
+          resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if .Values.metrics.kafka.extraVolumeMounts }}
+            {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }}
+            {{- end }}
+            {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }}
+            - name: kafka-exporter-certificates
+              mountPath: /opt/bitnami/kafka-exporter/certs/
+              readOnly: true
+            {{- if .Values.metrics.kafka.tlsCaSecret }}
+            - name: kafka-exporter-ca-certificate
+              mountPath: /opt/bitnami/kafka-exporter/cacert/
+              readOnly: true
+            {{- end }}
+            {{- end }}
+        {{- if .Values.metrics.kafka.sidecars }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }}
+        {{- end }}
+      volumes:
+        {{- if .Values.metrics.kafka.extraVolumes }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }}
+        {{- end }}
+        {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }}
+        - name: kafka-exporter-certificates
+          secret:
+            secretName: {{ .Values.metrics.kafka.certificatesSecret }}
+            defaultMode: 0440
+        {{- if .Values.metrics.kafka.tlsCaSecret }}
+        - name: kafka-exporter-ca-certificate
+          secret:
+            secretName: {{ .Values.metrics.kafka.tlsCaSecret }}
+            defaultMode: 0440
+        {{- end }}
+        {{- end }}
+{{- end }}

+ 16 - 0
kafka/helm/kafka/templates/kafka-metrics-serviceaccount.yaml

@@ -0,0 +1,16 @@
+{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "kafka.metrics.kafka.serviceAccountName" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: cluster-metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }}
+{{- end }}

+ 34 - 0
kafka/helm/kafka/templates/kafka-metrics-svc.yaml

@@ -0,0 +1,34 @@
+{{- if .Values.metrics.kafka.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: cluster-metrics
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }}
+  annotations:
+    {{- if .Values.metrics.kafka.service.annotations }}
+    {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }}
+    {{- end }}
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- end }}
+spec:
+  type: ClusterIP
+  sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }}
+  {{- if .Values.metrics.kafka.service.clusterIP }}
+  clusterIP: {{ .Values.metrics.kafka.service.clusterIP }}
+  {{- end }}
+  ports:
+    - name: http-metrics
+      port: {{ .Values.metrics.kafka.service.ports.metrics }}
+      protocol: TCP
+      targetPort: metrics
+  selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
+    app.kubernetes.io/component: cluster-metrics
+{{- end }}

+ 19 - 0
kafka/helm/kafka/templates/kafka-provisioning-secret.yaml

@@ -0,0 +1,19 @@
+{{- if and .Values.provisioning.enabled (include "kafka.client.tlsEncryption" .) (not .Values.provisioning.auth.tls.passwordsSecret)  }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "kafka.client.passwordsSecretName" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+type: Opaque
+data:
+  truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }}
+  keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }}
+  key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }}
+{{- end }}

+ 15 - 0
kafka/helm/kafka/templates/kafka-provisioning-serviceaccount.yaml

@@ -0,0 +1,15 @@
+{{- if .Values.provisioning.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "kafka.provisioning.serviceAccountName" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }}
+{{- end }}

+ 265 - 0
kafka/helm/kafka/templates/kafka-provisioning.yaml

@@ -0,0 +1,265 @@
+{{- if .Values.provisioning.enabled }}
+{{- $replicaCount := int .Values.replicaCount }}
+kind: Job
+apiVersion: batch/v1
+metadata:
+  name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: kafka-provisioning
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  annotations:
+    helm.sh/hook: post-install,post-upgrade
+    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
+    {{- if .Values.commonAnnotations }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+    {{- end }}
+spec:
+  template:
+    metadata:
+      labels: {{- include "common.labels.standard" . | nindent 8 }}
+        app.kubernetes.io/component: kafka-provisioning
+        {{- if .Values.provisioning.podLabels }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }}
+        {{- end }}
+      annotations:
+        {{- if .Values.provisioning.podAnnotations }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }}
+        {{- end }}
+    spec:
+      serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }}
+      {{- include "kafka.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.provisioning.schedulerName }}
+      schedulerName: {{ .Values.provisioning.schedulerName | quote }}
+      {{- end }}
+      {{- if .Values.provisioning.podSecurityContext.enabled }}
+      securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }}
+      {{- end }}
+      restartPolicy: OnFailure
+      terminationGracePeriodSeconds: 0
+      {{- if .Values.provisioning.nodeSelector }}
+      nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.provisioning.tolerations }}
+      tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }}
+      {{- end }}
+      {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }}
+      initContainers:
+        {{- if .Values.provisioning.waitForKafka }}
+        - name: wait-for-available-kafka
+          image: {{ include "kafka.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.provisioning.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          command:
+            - /bin/bash
+          args:
+            - -ec
+            - |
+              wait-for-port \
+                --host={{ include "common.names.fullname" . }} \
+                --state=inuse \
+                --timeout=120 \
+                {{ .Values.service.ports.client | int64 }};
+              echo "Kafka is available";
+          {{- if .Values.provisioning.resources }}
+          resources: {{- toYaml .Values.provisioning.resources | nindent 12 }}
+          {{- end }}
+        {{- end }}
+        {{- if .Values.provisioning.initContainers }}
+        {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }}
+        {{- end }}
+      {{- end }}
+      containers:
+        - name: kafka-provisioning
+          image: {{ include "kafka.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.provisioning.containerSecurityContext.enabled }}
+          securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
+          {{- else if .Values.provisioning.command }}
+          command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }}
+          {{- else }}
+          command:
+            - /bin/bash
+          {{- end }}
+          {{- if .Values.diagnosticMode.enabled }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
+          {{- else if .Values.provisioning.args }}
+          args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }}
+          {{- else }}
+          args:
+            - -ec
+            - |
+              echo "Configuring environment"
+              . /opt/bitnami/scripts/libkafka.sh
+              export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}"
+              if [ ! -f "$CLIENT_CONF" ]; then
+                touch $CLIENT_CONF
+
+                kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }}
+                {{- if (include "kafka.client.tlsEncryption" .) }}
+                kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }}
+                kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }}
+                ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD"
+                {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }}
+                {{- if .Values.provisioning.auth.tls.caCert }}
+                file_to_multiline_property() {
+                    awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}"
+                }
+                kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")"
+                kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")"
+                kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")"
+                {{- else }}
+                kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}"
+                kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}"
+                {{- end }}
+                {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }}
+                kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}"
+                kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}"
+                ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD"
+                ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD"
+                {{- end }}
+                {{- end }}
+                {{- if (include "kafka.client.saslAuthentication" .) }}
+                {{- if contains "plain" .Values.auth.sasl.mechanisms }}
+                kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN
+                kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";"
+                {{- else if contains "scram-sha-256" .Values.auth.sasl.mechanisms }}
+                kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256
+                kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";"
+                {{- else if contains "scram-sha-512" .Values.auth.sasl.mechanisms }}
+                kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512
+                kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";"
+                {{- end }}
+                {{- end }}
+              fi
+
+              echo "Running pre-provisioning script if any given"
+              {{ .Values.provisioning.preScript | nindent 14 }}
+
+              kafka_provisioning_commands=(
+              {{- range $topic := .Values.provisioning.topics }}
+                "/opt/bitnami/kafka/bin/kafka-topics.sh \
+                    --create \
+                    --if-not-exists \
+                    --bootstrap-server ${KAFKA_SERVICE} \
+                    --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \
+                    --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \
+                    {{- range $name, $value := $topic.config }}
+                    --config {{ $name }}={{ $value }} \
+                    {{- end }}
+                    --command-config ${CLIENT_CONF} \
+                    --topic {{ $topic.name }}"
+              {{- end }}
+              {{- range $command := .Values.provisioning.extraProvisioningCommands }}
+                {{- $command | quote | nindent 16 }}
+              {{- end }}
+              )
+
+              echo "Starting provisioning"
+              for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }}))
+              do
+                for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1)))
+                do
+                    ${kafka_provisioning_commands[j]} & # Async command
+                done
+                wait  # Wait the end of the jobs
+              done
+
+              echo "Running post-provisioning script if any given"
+              {{ .Values.provisioning.postScript | nindent 14 }}
+
+              echo "Provisioning succeeded"
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
+            {{- if (include "kafka.client.tlsEncryption" .) }}
+            - name: KAFKA_CLIENT_KEY_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ template "kafka.client.passwordsSecretName" . }}
+                  key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }}
+            - name: KAFKA_CLIENT_KEYSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ template "kafka.client.passwordsSecretName" . }}
+                  key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }}
+            - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ template "kafka.client.passwordsSecretName" . }}
+                  key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }}
+            {{- end }}
+            - name: KAFKA_SERVICE
+              value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }}
+            {{- if (include "kafka.client.saslAuthentication" .) }}
+            {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }}
+            - name: SASL_USERNAME
+              value: {{ index $clientUsers 0 | quote }}
+            - name: SASL_USER_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ include "kafka.jaasSecretName" . }}
+                  key: system-user-password
+            {{- end }}
+            {{- if .Values.provisioning.extraEnvVars }}
+            {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }}
+            {{- end }}
+          {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }}
+          envFrom:
+            {{- if .Values.provisioning.extraEnvVarsCM }}
+            - configMapRef:
+              name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }}
+            {{- end }}
+            {{- if .Values.provisioning.extraEnvVarsSecret }}
+            - secretRef:
+              name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }}
+            {{- end }}
+          {{- end }}
+          {{- if .Values.provisioning.resources }}
+          resources: {{- toYaml .Values.provisioning.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if or .Values.log4j .Values.existingLog4jConfigMap }}
+            - name: log4j-config
+              mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties
+              subPath: log4j.properties
+            {{- end }}
+            {{- if (include "kafka.client.tlsEncryption" .) }}
+            {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }}
+            - name: kafka-client-certs
+              mountPath: /certs
+              readOnly: true
+            {{- end }}
+            {{- end }}
+            {{- if .Values.provisioning.extraVolumeMounts }}
+            {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }}
+            {{- end }}
+        {{- if .Values.provisioning.sidecars }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }}
+        {{- end }}
+      volumes:
+        {{- if or .Values.log4j .Values.existingLog4jConfigMap }}
+        - name: log4j-config
+          configMap:
+            name: {{ include "kafka.log4j.configMapName" . }}
+        {{ end }}
+        {{- if (include "kafka.client.tlsEncryption" .) }}
+        {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }}
+        - name: kafka-client-certs
+          secret:
+            secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }}
+            defaultMode: 256
+        {{- end }}
+        {{- end }}
+        {{- if .Values.provisioning.extraVolumes }}
+        {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }}
+        {{- end }}
+{{- end }}

+ 17 - 0
kafka/helm/kafka/templates/log4j-configmap.yaml

@@ -0,0 +1,17 @@
+{{- if (include "kafka.log4j.createConfigMap" .) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kafka.log4j.configMapName" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+data:
+  log4j.properties: |-
+    {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }}
+{{- end -}}

+ 22 - 0
kafka/helm/kafka/templates/networkpolicy-egress.yaml

@@ -0,0 +1,22 @@
+{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
+kind: NetworkPolicy
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+metadata:
+  name: {{ printf "%s-egress" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  podSelector:
+    matchLabels:
+    {{- include "common.labels.matchLabels" . | nindent 6 }}
+  policyTypes:
+    - Egress
+  egress:
+    {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }}
+{{- end }}

+ 53 - 0
kafka/helm/kafka/templates/networkpolicy-ingress.yaml

@@ -0,0 +1,53 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
+metadata:
+  name: {{ printf "%s-ingress" (include "common.names.fullname" .) }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  podSelector:
+    matchLabels:
+    {{- include "common.labels.matchLabels" . | nindent 6 }}
+  policyTypes:
+    - Ingress
+  ingress:
+    # Allow client connections
+    - ports:
+        - port: {{ .Values.containerPorts.client }}
+      {{- if not .Values.networkPolicy.allowExternal }}
+      from:
+        - podSelector:
+            matchLabels:
+              {{ template "common.names.fullname" . }}-client: "true"
+          {{- if .Values.networkPolicy.explicitNamespacesSelector }}
+          namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }}
+          {{- end }}
+      {{- end }}
+    # Allow communication inter-broker
+    - ports:
+        - port: {{ .Values.containerPorts.internal }}
+      from:
+        - podSelector:
+            matchLabels:
+            {{- include "common.labels.matchLabels" . | nindent 14 }}
+    # Allow External connection
+    {{- if .Values.externalAccess.enabled }}
+    - ports:
+        - port: {{ .Values.containerPorts.external }}
+    {{- if .Values.networkPolicy.externalAccess.from }}
+      from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }}
+    {{- end }}
+    {{- end }}
+    {{- if .Values.metrics.kafka.enabled }}
+    # Allow prometheus scrapes
+    - ports:
+        - port: {{ .Values.metrics.kafka.containerPorts.metrics }}
+    {{- end }}
+{{- end }}

+ 26 - 0
kafka/helm/kafka/templates/poddisruptionbudget.yaml

@@ -0,0 +1,26 @@
+{{- $replicaCount := int .Values.replicaCount }}
+{{- if and .Values.pdb.create (gt $replicaCount 1) }}
+apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
+kind: PodDisruptionBudget
+metadata:
+  name: {{ include "common.names.fullname" . }}
+  namespace: {{ .Release.Namespace | quote }}
+  labels: {{- include "common.labels.standard" . | nindent 4 }}
+    app.kubernetes.io/component: kafka
+    {{- if .Values.commonLabels }}
+    {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+    {{- end }}
+  {{- if .Values.commonAnnotations }}
+  annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if .Values.pdb.minAvailable }}
+  minAvailable: {{ .Values.pdb.minAvailable }}
+  {{- end }}
+  {{- if .Values.pdb.maxUnavailable }}
+  maxUnavailable: {{ .Values.pdb.maxUnavailable }}
+  {{- end }}
+  selector:
+    matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
+      app.kubernetes.io/component: kafka
+{{- end }}

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff