yawyd 1 year ago
parent
commit
d2cf03e329
100 changed files with 3130 additions and 96 deletions
  1. 20 20
      clickhouse/templates/service.yaml
  2. 10 8
      clickhouse/values.yaml
  3. 21 0
      createcert/ca.crt
  4. 27 0
      createcert/ca.key
  5. 1 0
      createcert/ca.srl
  6. 3 0
      createcert/cecf-tls-secret.yaml
  7. 21 0
      createcert/cecf.base.crt
  8. 27 0
      createcert/cecf.base.key
  9. 32 0
      createcert/create-ca.sh
  10. 60 0
      createcert/create-certificate.sh
  11. 3 0
      createcert/import_to_k8s.sh
  12. 8 0
      createcert/readme.md
  13. 6 11
      deepflow/deepflow-otel-spring-demo.yaml
  14. 0 0
      docker-registry/.helmignore
  15. 13 0
      docker-registry/Chart.yaml
  16. 202 0
      docker-registry/LICENSE
  17. 126 0
      docker-registry/README.md
  18. 19 0
      docker-registry/templates/NOTES.txt
  19. 209 0
      docker-registry/templates/_helpers.tpl
  20. 13 0
      docker-registry/templates/configmap.yaml
  21. 67 0
      docker-registry/templates/cronjob.yaml
  22. 95 0
      docker-registry/templates/deployment.yaml
  23. 42 0
      docker-registry/templates/hpa.yaml
  24. 22 0
      docker-registry/templates/hpaV1.yaml
  25. 50 0
      docker-registry/templates/ingress.yaml
  26. 18 0
      docker-registry/templates/poddisruptionbudget.yaml
  27. 17 0
      docker-registry/templates/prometheusrules.yaml
  28. 27 0
      docker-registry/templates/pvc.yaml
  29. 40 0
      docker-registry/templates/secret.yaml
  30. 49 0
      docker-registry/templates/service.yaml
  31. 20 0
      docker-registry/templates/serviceaccount.yaml
  32. 21 0
      docker-registry/templates/servicemonitor.yaml
  33. 231 0
      docker-registry/values.yaml
  34. 0 48
      grafana/templates/quota.yml
  35. 4 4
      grafana/values.yaml
  36. 14 5
      ingress-nginx/ingress-nginx/values.yaml
  37. 23 0
      kafka/.helmignore
  38. 25 0
      kafka/Chart.yaml
  39. 226 0
      kafka/README.md
  40. 23 0
      kafka/charts/zookeeper/.helmignore
  41. 20 0
      kafka/charts/zookeeper/Chart.yaml
  42. 198 0
      kafka/charts/zookeeper/README.md
  43. BIN
      kafka/charts/zookeeper/img/zookeeper.png
  44. 12 0
      kafka/charts/zookeeper/templates/NOTES.txt
  45. 76 0
      kafka/charts/zookeeper/templates/_helpers.tpl
  46. 29 0
      kafka/charts/zookeeper/templates/headless-service.yaml
  47. 11 0
      kafka/charts/zookeeper/templates/poddisruptionbudget.yaml
  48. 4 0
      kafka/charts/zookeeper/templates/service-account.yaml
  49. 186 0
      kafka/charts/zookeeper/templates/statefulset.yaml
  50. 34 0
      kafka/charts/zookeeper/templates/tests/test-connection.yaml
  51. 100 0
      kafka/charts/zookeeper/values.yaml
  52. BIN
      kafka/img/kafka.png
  53. 8 0
      kafka/templates/NOTES.txt
  54. 92 0
      kafka/templates/_helpers.tpl
  55. 21 0
      kafka/templates/headless-service.yaml
  56. 30 0
      kafka/templates/nodeport-service.yaml
  57. 11 0
      kafka/templates/poddisruptionbudget.yaml
  58. 4 0
      kafka/templates/service-account.yaml
  59. 221 0
      kafka/templates/statefulset.yaml
  60. 91 0
      kafka/templates/tests/test-connection.yaml
  61. 126 0
      kafka/values.yaml
  62. 0 0
      kafka_bitnami/README.MD
  63. 0 0
      kafka_bitnami/docker/Dockerfile
  64. 0 0
      kafka_bitnami/docker/entrypoint.sh
  65. 0 0
      kafka_bitnami/helm/kafka/.helmignore
  66. 0 0
      kafka_bitnami/helm/kafka/Chart.lock
  67. 0 0
      kafka_bitnami/helm/kafka/Chart.yaml
  68. 0 0
      kafka_bitnami/helm/kafka/README.md
  69. 0 0
      kafka_bitnami/helm/kafka/charts/common/.helmignore
  70. 0 0
      kafka_bitnami/helm/kafka/charts/common/Chart.yaml
  71. 0 0
      kafka_bitnami/helm/kafka/charts/common/README.md
  72. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_affinities.tpl
  73. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_capabilities.tpl
  74. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_errors.tpl
  75. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_images.tpl
  76. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_ingress.tpl
  77. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_labels.tpl
  78. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_names.tpl
  79. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_secrets.tpl
  80. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_storage.tpl
  81. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_tplvalues.tpl
  82. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_utils.tpl
  83. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/_warnings.tpl
  84. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_cassandra.tpl
  85. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_mariadb.tpl
  86. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_mongodb.tpl
  87. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_mysql.tpl
  88. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_postgresql.tpl
  89. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_redis.tpl
  90. 0 0
      kafka_bitnami/helm/kafka/charts/common/templates/validations/_validations.tpl
  91. 0 0
      kafka_bitnami/helm/kafka/charts/common/values.yaml
  92. 21 0
      kafka_bitnami/helm/kafka/charts/zookeeper/.helmignore
  93. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/Chart.lock
  94. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/Chart.yaml
  95. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/README.md
  96. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/.helmignore
  97. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/Chart.yaml
  98. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/README.md
  99. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl
  100. 0 0
      kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl

+ 20 - 20
clickhouse/templates/service.yaml

@@ -109,15 +109,15 @@ spec:
     #  {{- end }}
     #{{- end }}
     #{{- end }}
-    #- name: tcp-mysql
-    #  targetPort: tcp-mysql
-    #  port: {{ .Values.service.ports.mysql }}
-    #  protocol: TCP
-    #  {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mysql)) }}
-    #  nodePort: {{ .Values.service.nodePorts.mysql }}
-    #  {{- else if eq .Values.service.type "ClusterIP" }}
-    #  nodePort: null
-    #  {{- end }}
+    - name: tcp-mysql
+      targetPort: tcp-mysql
+      port: {{ .Values.service.ports.mysql }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mysql)) }}
+      nodePort: {{ .Values.service.nodePorts.mysql }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
     #- name: tcp-postgresql
     #  targetPort: tcp-postgresql
     #  port: {{ .Values.service.ports.postgresql }}
@@ -136,17 +136,17 @@ spec:
     #  {{- else if eq .Values.service.type "ClusterIP" }}
     #  nodePort: null
     #  {{- end }}
-    #{{- if .Values.metrics.enabled }}
-    #- name: http-metrics
-    #  targetPort: http-metrics
-    #  port: {{ .Values.service.ports.metrics }}
-    #  protocol: TCP
-    #  {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }}
-    #  nodePort: {{ .Values.service.nodePorts.metrics }}
-    #  {{- else if eq .Values.service.type "ClusterIP" }}
-    #  nodePort: null
-    #  {{- end }}
-    #{{- end }}
+    {{- if .Values.metrics.enabled }}
+    - name: http-metrics
+      targetPort: http-metrics
+      port: {{ .Values.service.ports.metrics }}
+      protocol: TCP
+      {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }}
+      nodePort: {{ .Values.service.nodePorts.metrics }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+    {{- end }}
     #{{- if .Values.service.extraPorts }}
     #{{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
     #{{- end }}

+ 10 - 8
clickhouse/values.yaml

@@ -186,11 +186,11 @@ customStartupProbe: {}
 ##
 resources:
   limits: 
-    cpu: 3000m
-    memory: 6Gi
-  requests:
     cpu: 2000m
     memory: 4Gi
+  requests:
+    cpu: 1000m
+    memory: 2Gi
 ## Configure Pods Security Context
 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
 ## @param podSecurityContext.enabled Enabled ClickHouse pods' Security Context
@@ -590,8 +590,8 @@ affinity: {}
 ## ref: https://kubernetes.io/docs/user-guide/node-selection/
 ##
 
-nodeSelector: 
-  kubernetes.io/hostname: cest-3
+#nodeSelector: 
+#  kubernetes.io/hostname: cest-3
    
 #n @param to.io/hostname: cest-3erations Tolerations for ClickHouse pods assignment
 ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
@@ -718,6 +718,7 @@ service:
   ##
   ports:
     http: 8123
+    mysql: 9004
     metrics: 8001
   ## Node ports to expose
   ## @param service.nodePorts.http Node port for HTTP
@@ -736,6 +737,7 @@ service:
   nodePorts:
     http: ""
     metrics: 30001
+    mysql: "30004"
   ## @param service.clusterIP ClickHouse service Cluster IP
   ## e.g.:
   ## clusterIP: None
@@ -893,7 +895,7 @@ ingress:
   apiVersion: ""
   ## @param ingress.hostname Default host for the ingress record
   ##
-  hostname: clickhouse.cecf.base
+  hostname: clickhouse.cestong.com.cn
   ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
   ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
   ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
@@ -1158,7 +1160,7 @@ externalZookeeper:
   ## @param externalZookeeper.servers List of external zookeeper servers to use
   ## @param externalZookeeper.port Port of the Zookeeper servers
   ##
-  servers: []
+  servers: ["kafka-zookeeper-headless.observe.svc.cluster.local"]
   port: 2888
 
 ## @section Zookeeper subchart parameters
@@ -1168,7 +1170,7 @@ externalZookeeper:
 ## @param zookeeper.service.ports.client Zookeeper client port
 ##
 zookeeper:
-  enabled: true
+  enabled: false
   replicaCount: 1
   service:
     ports:

+ 21 - 0
createcert/ca.crt

@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIJAIhAnd/h2u3VMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
+BAYTAlVTMRgwFgYDVQQKDA9fRGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAMMGERldmVs
+b3BtZW50IGNlcnRpZmljYXRlczAeFw0yMzA4MjQwNjA1MjNaFw0zMzA4MjEwNjA1
+MjNaMEoxCzAJBgNVBAYTAlVTMRgwFgYDVQQKDA9fRGV2ZWxvcG1lbnQgQ0ExITAf
+BgNVBAMMGERldmVsb3BtZW50IGNlcnRpZmljYXRlczCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAL9D2d1BWeGVfzM540uOfoJ3mnwrfg2mFDQzTOHCWGqU
+MeCCvgfvu4nF47TL1BGtUILXpWIch1AN6JBgmLqdIBEN/J2JxPnP5AWVIv5XwAVR
+ypcfILoLN83DwQoZAFFH39pK+73JkkLbXkw35JFyzYZcKi25NnO6AdPKcW8T5RJq
+jtFaYXW6jeM3JETltl8ueZUjOKt49T51iZ3bGf9AwWXGlVx47/iauF94LB53P/BK
+XMQpobVDcXjrhKSG2UoGTiB9yDFHVpxVlYCvKXXmz3NzyizGdag/SyHnaT9A3ekA
+jNQG8tc6iOlFwgVgcvZCHVAJgeA03ZFGryOE/ezR55sCAwEAAaNQME4wHQYDVR0O
+BBYEFNlYFVMl0hhLbQLYb1yhvOJLNFkAMB8GA1UdIwQYMBaAFNlYFVMl0hhLbQLY
+b1yhvOJLNFkAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAChUUbcV
+GdAIGnn7biP/n05reo1HdwKVqC9j0qMY8+04/rG+XdzQWYgU8wlcY8k13Ty+pkfy
+9CUFy7FKzdXYBt9aBUKdGzpBGa0KjoV0D78JjbQSNW8V7KW7fvoAgyjXyUrgD1OL
+Hh2rVccYJxaBCYQIlX/yrRNmBWzc1MWoSYPHYbeCUokrdn+rYdoDlzVDEvFLTw3Q
+gma7KCnlmE5Pog3LIUySHmDujw4MGmoE0XQhoPozZuL6ZRaSoA7+d/OVrOY3YqUx
+11JXKJjWWMxQhAX5znFC9+SCNCxc7CrN9RYqvbLAE3aZ7OlSs37fcWNhx0gMqqsF
+BoEC33sONaWuxv8=
+-----END CERTIFICATE-----

+ 27 - 0
createcert/ca.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAv0PZ3UFZ4ZV/MznjS45+gneafCt+DaYUNDNM4cJYapQx4IK+
+B++7icXjtMvUEa1QgtelYhyHUA3okGCYup0gEQ38nYnE+c/kBZUi/lfABVHKlx8g
+ugs3zcPBChkAUUff2kr7vcmSQtteTDfkkXLNhlwqLbk2c7oB08pxbxPlEmqO0Vph
+dbqN4zckROW2Xy55lSM4q3j1PnWJndsZ/0DBZcaVXHjv+Jq4X3gsHnc/8EpcxCmh
+tUNxeOuEpIbZSgZOIH3IMUdWnFWVgK8pdebPc3PKLMZ1qD9LIedpP0Dd6QCM1Aby
+1zqI6UXCBWBy9kIdUAmB4DTdkUavI4T97NHnmwIDAQABAoIBAQCwPPEvMtyDRKGk
+rh6GrJJd3Z938wt/wBy1zN921wFpk4wx98bqwTfpR3L+USLj9vNM3TADK45zcZ1u
+C9QvgEDf9idjcmMSXUEa88T+4BSL0kX+4ivhwhx1eX8Sst7cBoJ6PsWgp3AiMsi9
+KnNuon5G4/3pi9Rk5mxwnwXO+GIHiJiqDaxTO0xej9FU95IzS22WNLsu7Zuz0TXv
+XBW8vAZkZqju74S2DM66UCEaVtg8XrrAW3ZlLjJJMtGygyHiMeKd3qpfmql0A1fr
+5L5ZXNwVUNGh2APpMF7rNN1OJLRoCUWabHIFHY+JprXECMX0iOX1z+25Co6HKJyw
+aw2XFOphAoGBAN86svdoW4beKzAWQsSg7HDCpurC7CP0FxIhkdQVWadVLcLzhEbe
+UrcmBMUkTcQ4Tw0VF0pPt5n6hMU9C9y/XOn/DAE0xalgXp5TI5hbDkhz5+FZrJa7
+8JkV8ixaPFtNEzHta1tkYn8sf1z+9oaXBFSS+3ASYJI9l2U3BV327e51AoGBANtX
+4tr/wZ3/PyFrjyi0p0Oa8QfPPzGzuBO972nKjwfilo+cC5aC2UWvAGtTHmZBImah
+bcdlHqy/aOw/95Fm0Mdhja///sxJw6u9SXV5FEtvnJDSyfHo6jDaYGdtTz7yja06
+d+3a3Cfj61AA8V4UiJrXYZW1YEa1Eo5F7k/LsNvPAoGBANc7TJVZjG40RiXeQNA/
+bNmoBLJz8KUV0yzLK0T2tPOvIClJDHAcKlqmkJDYWRbpR+cQP92JkTgooN1f59EW
+vWpsXFHJZQ/TeEdlV/utR8852gigotTPLsMbst1qv6k0Xl6UUR/UYVfHOqIavu20
+LBuzgDVlsrEuK4wOieE4ld6FAoGBAIIfZj5jYZp4lRwfcgNFAMGThpAKe8PhiT90
++zX4a9fSrV0uxpUYtQqo4gfJfIxO8biO5dBXhdYXi4gz/otswki179dMd/z7ebBF
+j/G3zble0jP8SL9RXLV0p6JT76I5RKFSwVkC8H5dsPh6RrZ4Wf357jNhMWdg/kwW
+4mTGQvpzAoGBAJqY3bZF3x/Gdk6a9FLwefS8Ozv8Png4GRcL3MxZiWbsauDDPRAk
+ZQxEG8EnIAr0DeVO4tpHGz8aR3yvWXc0XbvSmcQ9U51zHLDpICCVZS0TyoZXr5ps
+s6p6zpzdYVfsigD1xIwRvePIeQLT4PeJmAjINPaa8Z6EC1x3jgj3SWI6
+-----END RSA PRIVATE KEY-----

+ 1 - 0
createcert/ca.srl

@@ -0,0 +1 @@
+DD9B574E74C970CE

File diff suppressed because it is too large
+ 3 - 0
createcert/cecf-tls-secret.yaml


+ 21 - 0
createcert/cecf.base.crt

@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDhzCCAm+gAwIBAgIJAN2bV050yXDOMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
+BAYTAlVTMRgwFgYDVQQKDA9fRGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAMMGERldmVs
+b3BtZW50IGNlcnRpZmljYXRlczAeFw0yMzA4MjQwNjA1MzdaFw0zMzA4MjEwNjA1
+MzdaMD0xCzAJBgNVBAYTAlVTMRowGAYDVQQKDBFMb2NhbCBEZXZlbG9wbWVudDES
+MBAGA1UEAwwJY2VjZi5iYXNlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAnZpGOMlYeuEtd3RqZJF3KW1DbTO8l6cMD6AH6AvjV82JBIh1oQwBrv/GYRGA
+bkD6GA+Bw0RBh+NwOgtmJ/VzJh+FTGIYv9xo5Yj4xAfpvLPhbpst+nerjZuliNhh
+LlyI6Mutwt8vEHODZX9osbmORaPDeOkT1nXKy0wrIoDN7Lv2EXj6BTyKLOP3lpnb
+ATx773M3VdRg1UOlMYoEA4gEHtVYmzDUQHJ+aOQE5omhJ0d5KbMyeZ39OlFDiMQT
+xRDXU8HX40Y9SfshmQS0JOW1qrLhqUCyjhebF1cnajQRtYSCC8pCNmKbMk44QIoI
+d96tGsPd8IOyTpIAK3xTw7YN0wIDAQABo30wezAfBgNVHSMEGDAWgBTZWBVTJdIY
+S20C2G9cobziSzRZADAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE8DAdBgNVHSUEFjAU
+BggrBgEFBQcDAQYIKwYBBQUHAwIwIQYDVR0RBBowGIIJY2VjZi5iYXNlggsqLmNl
+Y2YuYmFzZTANBgkqhkiG9w0BAQsFAAOCAQEAoJ5hzab/XEeF+bd78p9YmB9TamsB
+aOAudNQ8/XPsyvAgiDB8ncU6e9SE7hdNb0Cgt5DZMLZ7D1I2xmpaGSilQUGO5zv+
+tn3QRVnPoNRMoNfwOaXjoJkxta9rPAMXnVUFTs3C/F86baZcFN4FD0ZhB7geRePF
+Ss0TlC668+LJdQanJfRS8tutvjdazQY+FeRrCprvQ9aOE3qvZ5f8OFJqIUG1rtg9
+a4uh8c4KatObqge+v0DDbrhMNJpUQRofvI5DJ5Zp3yIIGOTc7XKN5TUuBTqzV01O
+jEmuTwGawXXimeIz9Gu8pOiGxNpQAMz2SttU8w/zAIbkSMgtlM/nZsP/Og==
+-----END CERTIFICATE-----

+ 27 - 0
createcert/cecf.base.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAnZpGOMlYeuEtd3RqZJF3KW1DbTO8l6cMD6AH6AvjV82JBIh1
+oQwBrv/GYRGAbkD6GA+Bw0RBh+NwOgtmJ/VzJh+FTGIYv9xo5Yj4xAfpvLPhbpst
++nerjZuliNhhLlyI6Mutwt8vEHODZX9osbmORaPDeOkT1nXKy0wrIoDN7Lv2EXj6
+BTyKLOP3lpnbATx773M3VdRg1UOlMYoEA4gEHtVYmzDUQHJ+aOQE5omhJ0d5KbMy
+eZ39OlFDiMQTxRDXU8HX40Y9SfshmQS0JOW1qrLhqUCyjhebF1cnajQRtYSCC8pC
+NmKbMk44QIoId96tGsPd8IOyTpIAK3xTw7YN0wIDAQABAoH/TFZtrRPZx6tKWpII
+9q1/CpHiFwL4VnyTWv4o1jzr4ZuswoVWdKtcJWe+r0XK75yJhx37vJlTJH8F3Reo
+Y7KAE/cwmc7qLgSUypmN5WXv0XdPukNwAeH1NqItjBtC9zHbHueJQtrX8SAChScx
+juRSBWlNRUOgWEC1+cLrIctB82hVDbCwDXKB7zvchspGp+XNfutr7QVfeyD7ie3P
+RLNoZLIvzYPVmHmlDSKjlpOyeHH/jNlKGwWDnE/K7dkAnXEJ+qj3WZFHBNcREa+l
+kJ6DotAEsQSE+jhqq1Bs3HMJtZTsfy6r5vXqESqoSowTA10L4X/AygnQVA53yiY7
+h8pBAoGBANCrl40j5sE6uK4+fc3zbPgtRFTdACPGKs8RgV5fzad7CrbwcvbdnEUV
+h/C0n33m7UDSOoNFDRKPUGHYhXgeXuk+D0EDdx1CKJwKUGXyFZSC5ebWHVXkZpRG
+sUUycvFAMDc/0MSlcDVrbQhZM3tVQikqFhY40Kk3xcu1WXhiQCRZAoGBAMFZcnnM
+aUYX3UGUsysZ1tN2XooETW2SMKdT1FCWSXtpxU4OdEqTvwSgM22KPjGa5aO0sMHh
+V+U74RTN4+klx0oek7wD3ligWppj3B4yQ5Q91fOXMWDNU09NExupj8lryroeVgvX
+5mOnvqjD0aryOUuxngUO1vOv1vK27SI8QK4LAoGBALJWFB2x0yLNVszD56y+ICEy
+PrXwcpAgarGxmkjXw+EhDRUwBnDpEQcR0a5sD+sAE0BU62EJAJUeGmqlK9xuyFq6
+iTrdBU3iHubNAc0+yia0gFt6HnQr0Wdjy1kCNq3sV8jAJ2IIqg9dkj+STKdZREP+
+DgZL5Djay5a/lImIHp1xAoGBAIWZ7h01XwT9FnjrbEZ7C1OBNmA+CtaMrqvgpBOv
+VUU+2ARQvpCNuO9UV1T4snMKamWNRJojwN14W9ZJUpPW8/dGW6UOM0SgQRrvp7kB
+2GqFs+Im9grCti9UvcBekSmIH0QuFB0I6Xyw+3CYJrftuiOQrvUlxOgL+AYs2nzO
+j47zAoGBAJIqlgib9Yjn5usrWQo7wquyMHEcjjAKwDw3kh3LFNIYPjvZlVk9jP2y
+sWRpJXq7eheZYcYUA0pOmiBozCes4CNst5ctVuDXTPXsTO3vxDu5H2xQQIVOMrNd
++FK+JuY/TYEJxrXzX2oGFt+yhgMzPvqkIRMcFOiN1vziZzoL1UjY
+-----END RSA PRIVATE KEY-----

+ 32 - 0
createcert/create-ca.sh

@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+# Generates your own Certificate Authority for development.
+# This script should be executed just once.
+
+set -e
+
+if [ -f "ca.crt" ] || [ -f "ca.key" ]; then
+    echo -e "\e[41mCertificate Authority files already exist!\e[49m"
+    echo
+    echo -e "You only need a single CA even if you need to create multiple certificates."
+    echo -e "This way, you only ever have to import the certificate in your browser once."
+    echo
+    echo -e "If you want to restart from scratch, delete the \e[93mca.crt\e[39m and \e[93mca.key\e[39m files."
+    exit
+fi
+
+# Generate private key
+openssl genrsa -out ca.key 2048
+
+# Generate root certificate
+openssl req -x509 -new -nodes -subj "/C=US/O=_Development CA/CN=Development certificates" -key ca.key -sha256 -days 3650 -out ca.crt
+
+echo -e "\e[42mSuccess!\e[49m"
+echo
+echo "The following files have been written:"
+echo -e "  - \e[93mca.crt\e[39m is the public certificate that should be imported in your browser"
+echo -e "  - \e[93mca.key\e[39m is the private key that will be used by \e[93mcreate-certificate.sh\e[39m"
+echo
+echo "Next steps:"
+echo -e "  - Import \e[93mca.crt\e[39m in your browser"
+echo -e "  - run \e[93mcreate-certificate.sh example.com\e[39m"

+ 60 - 0
createcert/create-certificate.sh

@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+# Generates a wildcard certificate for a given domain name.
+
+set -e
+
+if [ -z "$1" ]; then
+    echo -e "\e[43mMissing domain name!\e[49m"
+    echo
+    echo "Usage: $0 example.com"
+    echo
+    echo "This will generate a wildcard certificate for the given domain name and its subdomains."
+    exit
+fi
+
+DOMAIN=$1
+
+if [ ! -f "ca.key" ]; then
+    echo -e "\e[41mCertificate Authority private key does not exist!\e[49m"
+    echo
+    echo -e "Please run \e[93mcreate-ca.sh\e[39m first."
+    exit
+fi
+
+# Generate a private key
+openssl genrsa -out "$DOMAIN.key" 2048
+
+# Create a certificate signing request
+openssl req -new -subj "/C=US/O=Local Development/CN=$DOMAIN" -key "$DOMAIN.key" -out "$DOMAIN.csr"
+
+# Create a config file for the extensions
+>"$DOMAIN.ext" cat <<-EOF
+authorityKeyIdentifier=keyid,issuer
+basicConstraints=CA:FALSE
+keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
+extendedKeyUsage = serverAuth, clientAuth
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = $DOMAIN
+DNS.2 = *.$DOMAIN
+EOF
+
+# Create the signed certificate
+openssl x509 -req \
+    -in "$DOMAIN.csr" \
+    -extfile "$DOMAIN.ext" \
+    -CA ca.crt \
+    -CAkey ca.key \
+    -CAcreateserial \
+    -out "$DOMAIN.crt" \
+    -days 3650 \
+    -sha256
+
+rm "$DOMAIN.csr"
+rm "$DOMAIN.ext"
+
+echo -e "\e[42mSuccess!\e[49m"
+echo
+echo -e "You can now use \e[93m$DOMAIN.key\e[39m and \e[93m$DOMAIN.crt\e[39m in your web server."
+echo -e "Don't forget that \e[1myou must have imported \e[93mca.crt\e[39m in your browser\e[0m to make it accept the certificate."

+ 3 - 0
createcert/import_to_k8s.sh

@@ -0,0 +1,3 @@
+kubectl -n observe create secret tls cecf-tls-secret \
+  --cert=cecf.base.crt \
+  --key=cecf.base.key

+ 8 - 0
createcert/readme.md

@@ -0,0 +1,8 @@
+- 先创建ca: o
+`bash create-ca.sh`
+
+- 后创建cert&key
+`bash create-certificate.sh domain.com`
+
+- 最后导入到k8s
+`bash  import_to_k8s.sh`

+ 6 - 11
deepflow/deepflow-otel-spring-demo.yaml

@@ -73,7 +73,6 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -85,7 +84,7 @@ spec:
             fieldRef:
               fieldPath: status.podIP
         - name: OTEL_RESOURCE_ATTRIBUTES
-          value: service.name=item-svc,pod.ip=$(SERVICE_HOST)
+          value: pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -161,7 +160,6 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -175,7 +173,7 @@ spec:
             fieldRef:
               fieldPath: status.podIP
         - name: OTEL_RESOURCE_ATTRIBUTES
-          value: service.name=order-svc,pod.ip=$(SERVICE_HOST)
+          value: pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -248,7 +246,6 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -260,7 +257,7 @@ spec:
             fieldRef:
               fieldPath: status.podIP
         - name: OTEL_RESOURCE_ATTRIBUTES
-          value: service.name=stock-svc,pod.ip=$(SERVICE_HOST)
+          value: pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -336,7 +333,6 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -350,7 +346,7 @@ spec:
             fieldRef:
               fieldPath: status.podIP
         - name: OTEL_RESOURCE_ATTRIBUTES
-          value: service.name=user-svc,pod.ip=$(SERVICE_HOST)
+          value: pod.ip=$(SERVICE_HOST)
         - name: SERVICE_PORT
           value: "20880"
         - name: SW_AGENT_NAME
@@ -426,7 +422,6 @@ spec:
         args:
           - /home/docker-entrypoint.sh
           - '-javaagent:/sidecar/agent/opentelemetry-javaagent.jar'
-          - '-Dotel.resource.attributes=${OTEL_RESOURCE_ATTRIBUTES}'
           - '-Dotel.traces.exporter=otlp'
           - '-Dotel.metrics.exporter=otlp'
           - '-jar'
@@ -440,7 +435,7 @@ spec:
             fieldRef:
               fieldPath: status.podIP
         - name: OTEL_RESOURCE_ATTRIBUTES
-          value: service.name=shop-web,pod.ip=$(SERVICE_HOST)
+          value: pod.ip=$(SERVICE_HOST)
         - name: SW_AGENT_NAME
           value: spring-svc-webshop
         - name: OTEL_EXPORTER_OTLP_ENDPOINT
@@ -511,7 +506,7 @@ spec:
                 fieldRef:
                   fieldPath: status.podIP
             - name: OTEL_RESOURCE_ATTRIBUTES
-              value: service.name=loadgenerator,pod.ip=$(MY_POD_IP)
+              value: pod.ip=$(MY_POD_IP)
             - name: USERS
               value: '1'
 ##################################################################################################

+ 0 - 0
kafka/helm/kafka/.helmignore → docker-registry/.helmignore


+ 13 - 0
docker-registry/Chart.yaml

@@ -0,0 +1,13 @@
+apiVersion: v1
+appVersion: 2.8.1
+description: A Helm chart for Docker Registry
+home: https://hub.docker.com/_/registry/
+icon: https://helm.twun.io/docker-registry.png
+maintainers:
+- email: devin@canterberry.cc
+  name: Devin Canterberry
+  url: https://canterberry.cc/
+name: docker-registry
+sources:
+- https://github.com/docker/distribution-library-image
+version: 2.2.2

+ 202 - 0
docker-registry/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright The Helm Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 126 - 0
docker-registry/README.md

@@ -0,0 +1,126 @@
+# Docker Registry Helm Chart
+
+This directory contains a Kubernetes chart to deploy a private Docker Registry.
+
+## Prerequisites Details
+
+* PV support on underlying infrastructure (if persistence is required)
+
+## Chart Details
+
+This chart will do the following:
+
+* Implement a Docker registry deployment
+
+## Installing the Chart
+
+First, add the repo:
+
+```console
+$ helm repo add twuni https://helm.twun.io
+```
+
+To install the chart, use the following:
+
+```console
+$ helm install twuni/docker-registry
+```
+
+## Configuration
+
+The following table lists the configurable parameters of the docker-registry chart and
+their default values.
+
+| Parameter                   | Description                                                                                | Default         |
+|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------|
+| `image.pullPolicy`          | Container pull policy                                                                      | `IfNotPresent`  |
+| `image.repository`          | Container image to use                                                                     | `registry`      |
+| `image.tag`                 | Container image tag to deploy                                                              | `2.7.1`         |
+| `imagePullSecrets`          | Specify image pull secrets                                                                 | `nil` (does not add image pull secrets to deployed pods) |
+| `persistence.accessMode`    | Access mode to use for PVC                                                                 | `ReadWriteOnce` |
+| `persistence.enabled`       | Whether to use a PVC for the Docker storage                                                | `false`         |
+| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest                                 | `nil`           |
+| `persistence.size`          | Amount of space to claim for PVC                                                           | `10Gi`          |
+| `persistence.storageClass`  | Storage Class to use for PVC                                                               | `-`             |
+| `persistence.existingClaim` | Name of an existing PVC to use for config                                                  | `nil`           |
+| `serviceAccount.create`     | Create ServiceAccount                                                                      | `false`         |
+| `serviceAccount.name`       | ServiceAccount name                                                                        | `nil`           |
+| `serviceAccount.annotations` | Annotations to add to the ServiceAccount                                                  | `{}`            |
+| `service.port`              | TCP port on which the service is exposed                                                   | `5000`          |
+| `service.type`              | service type                                                                               | `ClusterIP`     |
+| `service.clusterIP`         | if `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil`           |
+| `service.nodePort`          | if `service.type` is `NodePort` and this is non-empty, sets the node port of the service   | `nil`           |
+| `service.loadBalancerIP`     | if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerIP of the service | `nil`          |
+| `service.loadBalancerSourceRanges`| if `service.type` is `LoadBalancer` and this is non-empty, sets the loadBalancerSourceRanges of the service | `nil`           |
+| `service.sessionAffinity`       | service session affinity                                                               | `nil`           |
+| `service.sessionAffinityConfig` | service session affinity config                                                        | `nil`           |
+| `replicaCount`              | k8s replicas                                                                               | `1`             |
+| `updateStrategy`            | update strategy for deployment                                                             | `{}`            |
+| `podAnnotations`            | Annotations for pod                                                                        | `{}`            |
+| `podLabels`                 | Labels for pod                                                                             | `{}`            |
+| `podDisruptionBudget`       | Pod disruption budget                                                                      | `{}`            |
+| `resources.limits.cpu`      | Container requested CPU                                                                    | `nil`           |
+| `resources.limits.memory`   | Container requested memory                                                                 | `nil`           |
+| `autoscaling.enabled`       | Enable autoscaling using HorizontalPodAutoscaler                                           | `false`         |
+| `autoscaling.minReplicas`   | Minimal number of replicas                                                                 | `1`             |
+| `autoscaling.maxReplicas`   | Maximal number of replicas                                                                 | `2`             |
+| `autoscaling.targetCPUUtilizationPercentage` | Target average utilization of CPU on Pods                                 | `60`            |
+| `autoscaling.targetMemoryUtilizationPercentage` | (Kubernetes ≥1.23) Target average utilization of Memory on Pods        | `60`            |
+| `autoscaling.behavior`      | (Kubernetes ≥1.23) Configurable scaling behavior                                           | `{}`            |
+| `priorityClassName      `   | priorityClassName                                                                          | `""`            |
+| `storage`                   | Storage system to use                                                                      | `filesystem`    |
+| `tlsSecretName`             | Name of secret for TLS certs                                                               | `nil`           |
+| `secrets.htpasswd`          | Htpasswd authentication                                                                    | `nil`           |
+| `secrets.s3.accessKey`      | Access Key for S3 configuration                                                            | `nil`           |
+| `secrets.s3.secretKey`      | Secret Key for S3 configuration                                                            | `nil`           |
+| `secrets.s3.secretRef`      | The ref for an external secret containing the accessKey and secretKey keys                 | `""`            |
+| `secrets.swift.username`    | Username for Swift configuration                                                           | `nil`           |
+| `secrets.swift.password`    | Password for Swift configuration                                                           | `nil`           |
+| `secrets.haSharedSecret`    | Shared secret for Registry                                                                 | `nil`           |
+| `configData`                | Configuration hash for docker                                                              | `nil`           |
+| `s3.region`                 | S3 region                                                                                  | `nil`           |
+| `s3.regionEndpoint`         | S3 region endpoint                                                                         | `nil`           |
+| `s3.bucket`                 | S3 bucket name                                                                             | `nil`           |
+| `s3.rootdirectory`          | S3 prefix that is applied to allow you to segment data                                     | `nil`           |
+| `s3.encrypt`                | Store images in encrypted format                                                           | `nil`           |
+| `s3.secure`                 | Use HTTPS                                                                                  | `nil`           |
+| `swift.authurl`             | Swift authurl                                                                              | `nil`           |
+| `swift.container`           | Swift container                                                                            | `nil`           |
+| `proxy.enabled`             | If true, registry will function as a proxy/mirror                                          | `false`         |
+| `proxy.remoteurl`           | Remote registry URL to proxy requests to                                                   | `https://registry-1.docker.io`            |
+| `proxy.username`            | Remote registry login username                                                             | `nil`           |
+| `proxy.password`            | Remote registry login password                                                             | `nil`           |
+| `proxy.secretRef`           | The ref for an external secret containing the proxyUsername and proxyPassword keys         | `""`            |
+| `namespace`                 | specify a namespace to install the chart to - defaults to `.Release.Namespace`             | `{{ .Release.Namespace }}` |
+| `nodeSelector`              | node labels for pod assignment                                                             | `{}`            |
+| `affinity`                  | affinity settings                                                                          | `{}`            |
+| `tolerations`               | pod tolerations                                                                            | `[]`            |
+| `ingress.enabled`           | If true, Ingress will be created                                                           | `false`         |
+| `ingress.annotations`       | Ingress annotations                                                                        | `{}`            |
+| `ingress.labels`            | Ingress labels                                                                             | `{}`            |
+| `ingress.path`              | Ingress service path                                                                       | `/`             |
+| `ingress.hosts`             | Ingress hostnames                                                                          | `[]`            |
+| `ingress.tls`               | Ingress TLS configuration (YAML)                                                           | `[]`            |
+| `ingress.className`         | Ingress controller class name                                                              | `nginx`         |
+| `metrics.enabled`           | Enable metrics on Service                                                                  | `false`         |
+| `metrics.port`              | TCP port on which the service metrics is exposed                                           | `5001`          |
+| `metrics.serviceMonitor.annotations` | Prometheus Operator ServiceMonitor annotations                                    | `{}`            |
+| `metrics.serviceMonitor.enable` | If true, Prometheus Operator ServiceMonitor will be created                            | `false`         |
+| `metrics.serviceMonitor.labels` | Prometheus Operator ServiceMonitor labels                                              | `{}`            |
+| `metrics.prometheusRule.annotations` | Prometheus Operator PrometheusRule annotations                                    | `{}`            |
+| `metrics.prometheusRule.enable` | If true, Prometheus Operator prometheusRule will be created                            | `false`         |
+| `metrics.prometheusRule.labels` | Prometheus Operator prometheusRule labels                                              | `{}`            |
+| `metrics.prometheusRule.rules` | PrometheusRule defining alerting rules for a Prometheus instance                        | `{}`            |
+| `extraVolumeMounts`         | Additional volumeMounts to the registry container                                          | `[]`            |
+| `extraVolumes`              | Additional volumes to the pod                                                              | `[]`            |
+| `extraEnvVars`              | Additional environment variables to the pod                                                | `[]`            |
+| `initContainers`            | Init containers to be created in the pod                                                   | `[]`            |
+| `garbageCollect.enabled`    | If true, will deploy garbage-collector cronjob                                             | `false`         |
+| `garbageCollect.deleteUntagged` | If true, garbage-collector will delete manifests that are not currently referenced via tag | `true` |    |
+| `garbageCollect.schedule`   | CronTab schedule, please use standard crontab format                                        | `0 1 * * *` |  |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to
+`helm install`.
+
+To generate htpasswd file, run this docker command:
+`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`.

+ 19 - 0
docker-registry/templates/NOTES.txt

@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "docker-registry.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ template "docker-registry.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "docker-registry.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "docker-registry.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8080:5000
+{{- end }}

+ 209 - 0
docker-registry/templates/_helpers.tpl

@@ -0,0 +1,209 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "docker-registry.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "docker-registry.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "docker-registry.envs" -}}
+- name: REGISTRY_HTTP_SECRET
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: haSharedSecret
+
+{{- if .Values.secrets.htpasswd }}
+- name: REGISTRY_AUTH
+  value: "htpasswd"
+- name: REGISTRY_AUTH_HTPASSWD_REALM
+  value: "Registry Realm"
+- name: REGISTRY_AUTH_HTPASSWD_PATH
+  value: "/auth/htpasswd"
+{{- end }}
+
+{{- if .Values.tlsSecretName }}
+- name: REGISTRY_HTTP_TLS_CERTIFICATE
+  value: /etc/ssl/docker/tls.crt
+- name: REGISTRY_HTTP_TLS_KEY
+  value: /etc/ssl/docker/tls.key
+{{- end -}}
+
+{{- if eq .Values.storage "filesystem" }}
+- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
+  value: "/var/lib/registry"
+{{- else if eq .Values.storage "azure" }}
+- name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: azureAccountName
+- name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: azureAccountKey
+- name: REGISTRY_STORAGE_AZURE_CONTAINER
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: azureContainer
+{{- else if eq .Values.storage "s3" }}
+- name: REGISTRY_STORAGE_S3_REGION
+  value: {{ required ".Values.s3.region is required" .Values.s3.region }}
+- name: REGISTRY_STORAGE_S3_BUCKET
+  value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }}
+{{- if or (and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey) .Values.secrets.s3.secretRef }}
+- name: REGISTRY_STORAGE_S3_ACCESSKEY
+  valueFrom:
+    secretKeyRef:
+      name: {{ if .Values.secrets.s3.secretRef }}{{ .Values.secrets.s3.secretRef }}{{ else }}{{ template "docker-registry.fullname" . }}-secret{{ end }}
+      key: s3AccessKey
+- name: REGISTRY_STORAGE_S3_SECRETKEY
+  valueFrom:
+    secretKeyRef:
+      name: {{ if .Values.secrets.s3.secretRef }}{{ .Values.secrets.s3.secretRef }}{{ else }}{{ template "docker-registry.fullname" . }}-secret{{ end }}
+      key: s3SecretKey
+{{- end -}}
+
+{{- if .Values.s3.regionEndpoint }}
+- name: REGISTRY_STORAGE_S3_REGIONENDPOINT
+  value: {{ .Values.s3.regionEndpoint }}
+{{- end -}}
+
+{{- if .Values.s3.rootdirectory }}
+- name: REGISTRY_STORAGE_S3_ROOTDIRECTORY
+  value: {{ .Values.s3.rootdirectory | quote }}
+{{- end -}}
+
+{{- if .Values.s3.encrypt }}
+- name: REGISTRY_STORAGE_S3_ENCRYPT
+  value: {{ .Values.s3.encrypt | quote }}
+{{- end -}}
+
+{{- if .Values.s3.secure }}
+- name: REGISTRY_STORAGE_S3_SECURE
+  value: {{ .Values.s3.secure | quote }}
+{{- end -}}
+
+{{- else if eq .Values.storage "swift" }}
+- name: REGISTRY_STORAGE_SWIFT_AUTHURL
+  value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }}
+- name: REGISTRY_STORAGE_SWIFT_USERNAME
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: swiftUsername
+- name: REGISTRY_STORAGE_SWIFT_PASSWORD
+  valueFrom:
+    secretKeyRef:
+      name: {{ template "docker-registry.fullname" . }}-secret
+      key: swiftPassword
+- name: REGISTRY_STORAGE_SWIFT_CONTAINER
+  value: {{ required ".Values.swift.container is required" .Values.swift.container }}
+{{- end -}}
+
+{{- if .Values.proxy.enabled }}
+- name: REGISTRY_PROXY_REMOTEURL
+  value: {{ required ".Values.proxy.remoteurl is required" .Values.proxy.remoteurl }}
+- name: REGISTRY_PROXY_USERNAME
+  valueFrom:
+    secretKeyRef:
+      name: {{ if .Values.proxy.secretRef }}{{ .Values.proxy.secretRef }}{{ else }}{{ template "docker-registry.fullname" . }}-secret{{ end }}
+      key: proxyUsername
+- name: REGISTRY_PROXY_PASSWORD
+  valueFrom:
+    secretKeyRef:
+      name: {{ if .Values.proxy.secretRef }}{{ .Values.proxy.secretRef }}{{ else }}{{ template "docker-registry.fullname" . }}-secret{{ end }}
+      key: proxyPassword
+{{- end -}}
+
+{{- if .Values.persistence.deleteEnabled }}
+- name: REGISTRY_STORAGE_DELETE_ENABLED
+  value: "true"
+{{- end -}}
+
+{{- with .Values.extraEnvVars }}
+{{ toYaml . }}
+{{- end -}}
+
+{{- end -}}
+
+{{- define "docker-registry.volumeMounts" -}}
+- name: "{{ template "docker-registry.fullname" . }}-config"
+  mountPath: "/etc/docker/registry"
+
+{{- if .Values.secrets.htpasswd }}
+- name: auth
+  mountPath: /auth
+  readOnly: true
+{{- end }}
+
+{{- if eq .Values.storage "filesystem" }}
+- name: data
+  mountPath: /var/lib/registry/
+{{- end }}
+
+{{- if .Values.tlsSecretName }}
+- mountPath: /etc/ssl/docker
+  name: tls-cert
+  readOnly: true
+{{- end }}
+
+{{- with .Values.extraVolumeMounts }}
+{{ toYaml . }}
+{{- end }}
+
+{{- end -}}
+
+{{- define "docker-registry.volumes" -}}
+- name: {{ template "docker-registry.fullname" . }}-config
+  configMap:
+    name: {{ template "docker-registry.fullname" . }}-config
+
+{{- if .Values.secrets.htpasswd }}
+- name: auth
+  secret:
+    secretName: {{ template "docker-registry.fullname" . }}-secret
+    items:
+    - key: htpasswd
+      path: htpasswd
+{{- end }}
+
+{{- if eq .Values.storage "filesystem" }}
+- name: data
+  {{- if .Values.persistence.enabled }}
+  persistentVolumeClaim:
+    claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }}
+  {{- else }}
+  emptyDir: {}
+  {{- end -}}
+{{- end }}
+
+{{- if .Values.tlsSecretName }}
+- name: tls-cert
+  secret:
+    secretName: {{ .Values.tlsSecretName }}
+{{- end }}
+
+{{- with .Values.extraVolumes }}
+{{ toYaml . }}
+{{- end }}
+{{- end -}}

+ 13 - 0
docker-registry/templates/configmap.yaml

@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "docker-registry.fullname" . }}-config
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+data:
+  config.yml: |-
+{{ toYaml .Values.configData | indent 4 }}

+ 67 - 0
docker-registry/templates/cronjob.yaml

@@ -0,0 +1,67 @@
+{{- if .Values.garbageCollect.enabled }}
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+  name: {{ template "docker-registry.fullname" . }}-garbage-collector
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  schedule: {{ .Values.garbageCollect.schedule | quote }}
+  jobTemplate:
+    metadata:
+      labels:
+        app: {{ template "docker-registry.name" . }}
+        release: {{ .Release.Name }}
+        {{- with .Values.podLabels }}
+        {{ toYaml . | nindent 8 }}
+        {{- end }}
+      annotations:
+        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+        checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+        {{- if .Values.podAnnotations }}
+        {{ toYaml .Values.podAnnotations | nindent 8 }}
+        {{- end }}
+    spec:
+      template:
+        spec:
+          {{- if or (eq .Values.serviceAccount.create true) (ne .Values.serviceAccount.name "") }}
+          serviceAccountName: {{ .Values.serviceAccount.name | default (include "docker-registry.fullname" .) }}
+          {{- end }}
+          {{- if .Values.imagePullSecrets }}
+          imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 12 }}
+          {{- end }}
+          {{- if .Values.priorityClassName }}
+          priorityClassName: "{{ .Values.priorityClassName }}"
+          {{- end }}
+          {{- if .Values.securityContext.enabled }}
+          securityContext:
+            fsGroup: {{ .Values.securityContext.fsGroup }}
+            runAsUser: {{ .Values.securityContext.runAsUser }}
+          {{- end }}
+          containers:
+            - name: {{ .Chart.Name }}
+              image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+              imagePullPolicy: {{ .Values.image.pullPolicy }}
+              command:
+              - /bin/registry
+              - garbage-collect
+              - --delete-untagged={{ .Values.garbageCollect.deleteUntagged }}
+              - /etc/docker/registry/config.yml
+              env: {{ include "docker-registry.envs" . | nindent 16 }}
+              volumeMounts: {{ include "docker-registry.volumeMounts" . | nindent 16 }}
+          restartPolicy: OnFailure
+          {{- if .Values.nodeSelector }}
+          nodeSelector: {{ toYaml .Values.nodeSelector | nindent 12 }}
+          {{- end }}
+          {{- if .Values.affinity }}
+          affinity: {{ toYaml .Values.affinity | nindent 12 }}
+          {{- end }}
+          {{- if .Values.tolerations }}
+          tolerations: {{ toYaml .Values.tolerations | nindent 12 }}
+          {{- end }}
+          volumes: {{ include "docker-registry.volumes" . | nindent 12 }}
+{{- end }}

+ 95 - 0
docker-registry/templates/deployment.yaml

@@ -0,0 +1,95 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "docker-registry.name" . }}
+      release: {{ .Release.Name }}
+  replicas: {{ .Values.replicaCount }}
+  {{- if .Values.updateStrategy }}
+  strategy: {{ toYaml .Values.updateStrategy | nindent 4 }}
+  {{- end }}
+  minReadySeconds: 5
+  template:
+    metadata:
+      labels:
+        app: {{ template "docker-registry.name" . }}
+        release: {{ .Release.Name }}
+        {{- with .Values.podLabels }}
+        {{ toYaml . | nindent 8 }}
+        {{- end }}
+      annotations:
+        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+        checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+        {{- if .Values.podAnnotations }}
+        {{ toYaml .Values.podAnnotations | nindent 8 }}
+        {{- end }}
+    spec:
+      {{- if or (eq .Values.serviceAccount.create true) (ne .Values.serviceAccount.name "") }}
+      serviceAccountName: {{ .Values.serviceAccount.name | default (include "docker-registry.fullname" .) }}
+      {{- end }}
+      {{- if .Values.imagePullSecrets }}
+      imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
+      {{- end }}
+      {{- if .Values.priorityClassName }}
+      priorityClassName: "{{ .Values.priorityClassName }}"
+      {{- end }}
+      {{- if .Values.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.securityContext.fsGroup }}
+        runAsUser: {{ .Values.securityContext.runAsUser }}
+      {{- end }}
+      {{- with .Values.initContainers }}
+      initContainers:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      containers:
+        - name: {{ .Chart.Name }}
+          image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          command:
+          - /bin/registry
+          - serve
+          - /etc/docker/registry/config.yml
+          ports:
+            - containerPort: 5000
+            {{- if .Values.metrics.enabled }}
+            - containerPort: {{ (split ":" .Values.configData.http.debug.addr)._1 }}
+              name: http-metrics
+              protocol: TCP
+            {{- end }}
+          livenessProbe:
+            httpGet:
+              {{- if .Values.tlsSecretName }}
+              scheme: HTTPS
+              {{- end }}
+              path: /
+              port: 5000
+          readinessProbe:
+            httpGet:
+              {{- if .Values.tlsSecretName }}
+              scheme: HTTPS
+              {{- end }}
+              path: /
+              port: 5000
+          resources: {{ toYaml .Values.resources | nindent 12 }}
+          env: {{ include "docker-registry.envs" . | nindent 12 }}
+          volumeMounts: {{ include "docker-registry.volumeMounts" . | nindent 12 }}
+      {{- if .Values.nodeSelector }}
+      nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
+      {{- end }}
+      {{- if .Values.affinity }}
+      affinity: {{ toYaml .Values.affinity | nindent 8 }}
+      {{- end }}
+      {{- if .Values.tolerations }}
+      tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
+      {{- end }}
+      volumes: {{ include "docker-registry.volumes" . | nindent 8 }}

+ 42 - 0
docker-registry/templates/hpa.yaml

@@ -0,0 +1,42 @@
+{{- if .Values.autoscaling.enabled }}
+{{- $apiVersions := .Capabilities.APIVersions -}}
+{{- if $apiVersions.Has "autoscaling/v2" }}
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: {{ template "docker-registry.fullname" . }}
+  minReplicas: {{ .Values.autoscaling.minReplicas }}
+  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+  metrics:
+{{- with .Values.autoscaling.targetCPUUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: {{ . }}
+{{- end }}
+{{- with .Values.autoscaling.targetMemoryUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: memory
+        target:
+          type: Utilization
+          averageUtilization: {{ . }}
+{{- end }}
+{{- with .Values.autoscaling.behavior }}
+  behavior:
+    {{- toYaml . | nindent 4 }}
+{{- end }}
+{{- end }}
+{{- end }}

+ 22 - 0
docker-registry/templates/hpaV1.yaml

@@ -0,0 +1,22 @@
+{{- if .Values.autoscaling.enabled }}
+{{- $apiVersions := .Capabilities.APIVersions -}}
+{{- if not ($apiVersions.Has "autoscaling/v2") }}
+apiVersion: autoscaling/v1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: {{ template "docker-registry.fullname" . }}
+  minReplicas: {{ .Values.autoscaling.minReplicas }}
+  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+  targetCPUUtilizationPercentage: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+{{- end }}
+{{- end }}

+ 50 - 0
docker-registry/templates/ingress.yaml

@@ -0,0 +1,50 @@
+{{- if .Values.ingress.enabled -}}
+{{- $apiVersions := .Capabilities.APIVersions -}}
+{{- $serviceName := include "docker-registry.fullname" . -}}
+{{- $servicePort := .Values.service.port -}}
+{{- $path := .Values.ingress.path -}}
+apiVersion: {{- if $apiVersions.Has "networking.k8s.io/v1" }} networking.k8s.io/v1 {{- else }} networking.k8s.io/v1beta1 {{- end }}
+kind: Ingress
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+{{- if .Values.ingress.labels }}
+{{ toYaml .Values.ingress.labels | indent 4 }}
+{{- end }}
+  annotations:
+    {{- range $key, $value := .Values.ingress.annotations }}
+      {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+{{- if $apiVersions.Has "networking.k8s.io/v1" }}
+  ingressClassName: {{ .Values.ingress.className }}
+{{- end }}
+  rules:
+    {{- range $host := .Values.ingress.hosts }}
+    - host: {{ $host }}
+      http:
+        paths:
+          - path: {{ $path }}
+{{- if $apiVersions.Has "networking.k8s.io/v1" }}
+            pathType: Prefix
+            backend:
+              service:
+                name: {{ $serviceName }}
+                port:
+                  number: {{ $servicePort }}
+{{- else }}
+            backend:
+              serviceName: {{ $serviceName }}
+              servicePort: {{ $servicePort }}
+{{- end }}
+    {{- end -}}
+  {{- if .Values.ingress.tls }}
+  tls:
+{{ toYaml .Values.ingress.tls | indent 4 }}
+  {{- end -}}
+{{- end -}}

+ 18 - 0
docker-registry/templates/poddisruptionbudget.yaml

@@ -0,0 +1,18 @@
+{{- if .Values.podDisruptionBudget -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "docker-registry.name" . }}
+      release: {{ .Release.Name }}
+{{ toYaml .Values.podDisruptionBudget | indent 2 }}
+{{- end -}}

+ 17 - 0
docker-registry/templates/prometheusrules.yaml

@@ -0,0 +1,17 @@
+{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  labels:
+    app.kubernetes.io/component: controller
+  {{- if .Values.metrics.prometheusRule.labels }}
+    {{- toYaml .Values.metrics.prometheusRule.labels | nindent 4 }}
+  {{- end }}
+spec:
+{{- if .Values.metrics.prometheusRule.rules }}
+  groups:
+  - name: {{ template "docker-registry.fullname" . }}
+    rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 4 }}
+{{- end }}
+{{- end }}

+ 27 - 0
docker-registry/templates/pvc.yaml

@@ -0,0 +1,27 @@
+{{- if .Values.persistence.enabled }}
+{{- if not .Values.persistence.existingClaim -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.fullname" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+spec:
+  accessModes:
+    - {{ .Values.persistence.accessMode | quote }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end -}}

+ 40 - 0
docker-registry/templates/secret.yaml

@@ -0,0 +1,40 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ template "docker-registry.fullname" . }}-secret
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+type: Opaque
+data:
+  {{- if .Values.secrets.htpasswd }}
+  htpasswd: {{ .Values.secrets.htpasswd | b64enc }}
+  {{- end }}
+  {{- if .Values.secrets.haSharedSecret }}
+  haSharedSecret: {{ .Values.secrets.haSharedSecret | b64enc | quote }}
+  {{- else }}
+  haSharedSecret: {{ randAlphaNum 16 | b64enc | quote }}
+  {{- end }}
+  
+  {{- if eq .Values.storage "azure" }}
+    {{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }}
+  azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }}
+  azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }}
+  azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }}
+    {{- end }}
+  {{- else if eq .Values.storage "s3" }}
+    {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}
+  s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }}
+  s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }}
+    {{- end }}
+  {{- else if eq .Values.storage "swift" }}
+    {{- if and .Values.secrets.swift.username .Values.secrets.swift.password }}
+  swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }}
+  swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }}
+    {{- end }}
+  {{- end }}
+  proxyUsername: {{ .Values.proxy.username | default "" | b64enc | quote }}
+  proxyPassword: {{ .Values.proxy.password | default "" | b64enc | quote }}

+ 49 - 0
docker-registry/templates/service.yaml

@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "docker-registry.fullname" . }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+{{- if .Values.service.annotations }}
+  annotations:
+{{ toYaml .Values.service.annotations | indent 4 }}
+{{- end }}
+spec:
+  type: {{ .Values.service.type }}
+{{- if (and (eq .Values.service.type "ClusterIP") (not (empty .Values.service.clusterIP))) }}
+  clusterIP: {{ .Values.service.clusterIP }}
+{{- end }}
+{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+{{- end }}
+{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges))) }}
+  loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
+{{- end }}
+{{- if .Values.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.service.sessionAffinity }}
+  {{- if .Values.service.sessionAffinityConfig }}
+  sessionAffinityConfig:
+    {{ toYaml .Values.service.sessionAffinityConfig | nindent 4 }}
+  {{- end -}}
+{{- end }}
+  ports:
+    - port: {{ .Values.service.port }}
+      protocol: TCP
+      name: {{ if .Values.tlsSecretName }}https{{ else }}http{{ end }}-{{ .Values.service.port }}
+      targetPort: 5000
+{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
+      nodePort: {{ .Values.service.nodePort }}
+{{- end }}
+{{- if .Values.metrics.enabled }}
+    - port: {{ .Values.metrics.port }}
+      protocol: TCP
+      name: http-metrics
+      targetPort: {{ (split ":" .Values.configData.http.debug.addr)._1 }}
+{{- end }}
+  selector:
+    app: {{ template "docker-registry.name" . }}
+    release: {{ .Release.Name }}

+ 20 - 0
docker-registry/templates/serviceaccount.yaml

@@ -0,0 +1,20 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: {{ template "docker-registry.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    heritage: {{ .Release.Service }}
+    release: {{ .Release.Name }}
+  namespace: {{ .Values.namespace | default .Release.Namespace }}
+{{- if .Values.serviceAccount.name }}
+  name: {{ .Values.serviceAccount.name }}
+{{- else  }}
+  name: {{ include "docker-registry.fullname" . }}
+{{- end }}
+{{- if .Values.serviceAccount.annotations }}
+  annotations:
+{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
+{{- end }}
+{{- end -}}

+ 21 - 0
docker-registry/templates/servicemonitor.yaml

@@ -0,0 +1,21 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ template "docker-registry.fullname" . }}-servicemonitor
+  labels:
+    app: {{ template "docker-registry.name" . }}-metrics
+    release: {{ .Release.Name }}
+{{- if .Values.metrics.serviceMonitor.labels }}
+{{ toYaml .Values.metrics.serviceMonitor.labels | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ template "docker-registry.name" . }}
+      release: {{ .Release.Name }}
+      heritage: {{ .Release.Service }}
+  endpoints:
+  - port: http-metrics
+    interval: 15s
+{{- end }}

+ 231 - 0
docker-registry/values.yaml

@@ -0,0 +1,231 @@
+# Default values for docker-registry.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+
+updateStrategy: {}
+  # type: RollingUpdate
+  # rollingUpdate:
+  #   maxSurge: 1
+  #   maxUnavailable: 0
+
+podAnnotations: {}
+podLabels: {}
+
+serviceAccount:
+  create: false
+  name: ""
+  annotations: {}
+
+image:
+  repository: registry
+  tag: latest
+  pullPolicy: IfNotPresent
+# imagePullSecrets:
+    # - name: docker
+service:
+  name: registry
+  type: ClusterIP
+  # sessionAffinity: None
+  # sessionAffinityConfig: {}
+  # clusterIP:
+  port: 5000
+    #nodePort: 31010
+  # loadBalancerIP:
+  # loadBalancerSourceRanges:
+  annotations: {}
+  # foo.io/bar: "true"
+ingress:
+  enabled: true
+  className: nginx
+  path: /
+  # Used to create an Ingress record.
+  hosts:
+    - reg.cestong.com.cn
+  annotations: 
+    nginx.ingress.kubernetes.io/proxy-connect-timeout: "60"
+    nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
+    nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
+    nginx.ingress.kubernetes.io/proxy-body-size: "0"
+  labels: {}
+  tls:
+    # Secrets must be manually created in the namespace.
+    - secretName: cecf-tls-secret
+      hosts:
+        - reg.cestong.com.cn
+resources:
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  limits:
+   cpu: 400m
+   memory: 2Gi
+  requests:
+   cpu: 200m
+   memory: 1Gi
+persistence:
+  accessMode: 'ReadWriteOnce'
+  enabled: true
+  size: 30Gi
+  storageClass: 'openebs-hostpath'
+
+# set the type of filesystem to use: filesystem, s3
+storage: filesystem
+
+# Set this to name of secret for tls certs
+# tlsSecretName: registry.docker.example.com
+secrets:
+  haSharedSecret: ""
+  htpasswd: ""
+# Secrets for Azure
+#   azure:
+#     accountName: ""
+#     accountKey: ""
+#     container: ""
+# Secrets for S3 access and secret keys
+# Use a secretRef with keys (accessKey, secretKey) for secrets stored outside the chart
+#   s3:
+#     secretRef: ""
+#     accessKey: ""
+#     secretKey: ""
+# Secrets for Swift username and password
+#   swift:
+#     username: ""
+#     password: ""
+
+# Options for s3 storage type:
+# s3:
+#  region: us-east-1
+#  regionEndpoint: s3.us-east-1.amazonaws.com
+#  bucket: my-bucket
+#  rootdirectory: /object/prefix
+#  encrypt: false
+#  secure: true
+
+# Options for swift storage type:
+# swift:
+#  authurl: http://swift.example.com/
+#  container: my-container
+
+# https://docs.docker.com/registry/recipes/mirror/
+proxy:
+  enabled: false
+  remoteurl: https://registry-1.docker.io
+  username: ""
+  password: ""
+  # the ref for a secret stored outside of this chart
+  # Keys: proxyUsername, proxyPassword
+  secretRef: ""
+
+metrics:
+  enabled: false
+  port: 5001
+  # Create a prometheus-operator servicemonitor
+  serviceMonitor:
+    enabled: false
+    labels: {}
+  # prometheus-operator PrometheusRule defining alerting rules for a Prometheus instance
+  prometheusRule:
+    enabled: false
+    labels: {}
+    rules: {}
+
+configData:
+  version: 0.1
+  validation:
+    disabled: true
+  log:
+    fields:
+      service: registry
+  storage:
+    cache:
+      blobdescriptor: inmemory
+  http:
+    addr: :5000
+    headers:
+      X-Content-Type-Options: [nosniff]
+    debug:
+      addr: :5001
+      prometheus:
+        enabled: false
+        path: /metrics
+  health:
+    storagedriver:
+      enabled: true
+      interval: 10s
+      threshold: 3
+
+securityContext:
+  enabled: true
+  runAsUser: 1000
+  fsGroup: 1000
+
+priorityClassName: ""
+
+podDisruptionBudget: {}
+  # maxUnavailable: 1
+  # minAvailable: 2
+
+autoscaling:
+  enabled: false
+  minReplicas: 1
+  maxReplicas: 2
+  targetCPUUtilizationPercentage: 60
+  targetMemoryUtilizationPercentage: 60 # available only on Kubernetes ≥1.23 [required "autoscaling/v2"]
+  behavior: {} # available only on Kubernetes ≥1.23 [required "autoscaling/v2"]
+#   scaleDown:
+#     stabilizationWindowSeconds: 300
+#     policies:
+#     - type: Percent
+#       value: 100
+#       periodSeconds: 15
+#   scaleUp:
+#     stabilizationWindowSeconds: 0
+#     policies:
+#     - type: Percent
+#       value: 100
+#       periodSeconds: 15
+#     - type: Pods
+#       value: 4
+#       periodSeconds: 15
+#     selectPolicy: Max
+
+nodeSelector:
+  kubernetes.io/hostname: k8sw3
+
+affinity: {}
+
+tolerations: []
+
+extraVolumeMounts: []
+## Additional volumeMounts to the registry container.
+#  - mountPath: /secret-data
+#    name: cloudfront-pem-secret
+#    readOnly: true
+
+extraVolumes: []
+## Additional volumes to the pod.
+#  - name: cloudfront-pem-secret
+#    secret:
+#      secretName: cloudfront-credentials
+#      items:
+#        - key: cloudfront.pem
+#          path: cloudfront.pem
+#          mode: 511
+
+extraEnvVars: []
+## Additional ENV variables to set
+# - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
+#   value: "/var/lib/example"
+
+initContainers: []
+## Init containers to add to the Deployment
+# - name: init
+#   image: busybox
+#   command: []
+
+garbageCollect:
+  enabled: false
+  deleteUntagged: true
+  schedule: "0 1 * * *"

+ 0 - 48
grafana/templates/quota.yml

@@ -1,48 +0,0 @@
-apiVersion: v1
-kind: List
-items:
-- apiVersion: v1
-  kind: ResourceQuota
-  metadata:
-    name: pods-high
-  spec:
-    hard:
-      cpu: "1000"
-      memory: 200Gi
-      pods: "10"
-    scopeSelector:
-      matchExpressions:
-      - operator: In
-        scopeName: PriorityClass
-        values: ["high"]
-- apiVersion: v1
-  kind: ResourceQuota
-  metadata:
-    name: pods-medium
-  spec:
-    hard:
-      cpu: "10"
-      memory: 20Gi
-      pods: "10"
-    scopeSelector:
-      matchExpressions:
-      - operator: In
-        scopeName: PriorityClass
-        values: ["medium"]
-- apiVersion: v1
-  kind: ResourceQuota
-  metadata:
-    name: pods-low
-  spec:
-    hard:
-      cpu: "5"
-      memory: 10Gi
-      pods: "10"
-    scopeSelector:
-      matchExpressions:
-      - operator: In
-        scopeName: PriorityClass
-        values: ["low"]
-
-
-

+ 4 - 4
grafana/values.yaml

@@ -39,7 +39,7 @@ serviceAccount:
 #    eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
   autoMount: true
 
-replicas: 20
+replicas: 1
 
 ## Create a headless service for the deployment
 headlessService: false
@@ -134,7 +134,7 @@ extraEmptyDirMounts: []
 extraLabels: {}
 
 ## Assign a PriorityClassName to pods if set
-priorityClassName: pods-high
+#priorityClassName: pods-high
 
 downloadDashboardsImage:
   repository: curlimages/curl
@@ -233,7 +233,7 @@ ingress:
   pathType: Prefix
 
   hosts:
-    - grafana.cecf.base
+    - grafana.cestong.com.cn
   ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
   extraPaths: []
   # - path: /*
@@ -261,7 +261,7 @@ resources:
 #    memory: 128Mi
   requests:
     cpu: 500m
-    memory: 10Gi
+    memory: 2Gi
 
 ## Node labels for pod assignment
 ## ref: https://kubernetes.io/docs/user-guide/node-selection/

+ 14 - 5
ingress-nginx/ingress-nginx/values.yaml

@@ -95,9 +95,9 @@ controller:
     enabled: true
     ports:
       # -- 'hostPort' http port
-      http: 8580
+      http: 80
       # -- 'hostPort' https port
-      https: 8534
+      https: 443
 
   # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader'
   electionID: ""
@@ -191,7 +191,7 @@ controller:
   #         name: secret-resource
 
   # -- Use a `DaemonSet` or `Deployment`
-  kind: DaemonSet
+  kind: Deployment
 
   # -- Annotations to be added to the controller Deployment or DaemonSet
   ##
@@ -229,7 +229,16 @@ controller:
   # -- Affinity and anti-affinity rules for server scheduling to nodes
   ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
   ##
-  affinity: {}
+  affinity:
+    nodeAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+        nodeSelectorTerms:
+        - matchExpressions:
+          - key: kubernetes.io/hostname
+            operator: In
+            values:
+            - k8sw3
+            - k8sw4
     # # An example of preferred pod anti-affinity, weight is in the range 1-100
     # podAntiAffinity:
     #   preferredDuringSchedulingIgnoredDuringExecution:
@@ -343,7 +352,7 @@ controller:
   ##
   podAnnotations: {}
 
-  replicaCount: 1
+  replicaCount: 2
 
   minAvailable: 1
 

+ 23 - 0
kafka/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 25 - 0
kafka/Chart.yaml

@@ -0,0 +1,25 @@
+apiVersion: v2
+appVersion: 7.3.0
+dependencies:
+- alias: zookeeper
+  condition: zookeeper.enabled
+  name: zookeeper
+  repository: https://ricardo-aires.github.io/helm-charts/
+  version: 0.2.0
+description: A Helm chart for Confluent Kafka on Kubernetes
+home: https://github.com/ricardo-aires/helm-charts
+icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg
+keywords:
+- kafka
+- confluent
+maintainers:
+- email: ricardoaireshenriques@gmail.com
+  name: Ricardo Henriques
+  url: https://github.com/ricardo-aires
+name: kafka
+sources:
+- https://github.com/ricardo-aires/helm-charts
+- https://kafka.apache.org/documentation/#configuration
+- https://docs.confluent.io/platform/current/installation/configuration/broker-configs.html
+type: application
+version: 0.2.0

+ 226 - 0
kafka/README.md

@@ -0,0 +1,226 @@
+# kafka
+
+A Helm chart for Confluent Kafka on Kubernetes
+
+## Introduction
+
+This chart bootstraps a [Kafka Cluster](https://kafka.apache.org) using the [Confluent](https://docs.confluent.io/home/kafka-intro.html) stable version.
+
+[Kafka](https://kafka.apache.org) is an open-source distributed event streaming platform that:
+
+- Publishes and subscribes to streams of records, similar to a message queue or enterprise messaging system.
+- Stores streams of records in a fault-tolerant durable way.
+- Processes streams of records as they occur.
+
+## Developing Environment
+
+| component                                                                      | version |
+| ------------------------------------------------------------------------------ | ------- |
+| [Podman](https://docs.podman.io/en/latest/)                                    | v4.3.1  |
+| [Minikube](https://minikube.sigs.k8s.io/docs/)                                 | v1.28.0 |
+| [Kubernetes](https://kubernetes.io)                                            | v1.25.3 |
+| [Helm](https://helm.sh)                                                        | v3.10.2 |
+| [Confluent Platform](https://docs.confluent.io/platform/current/overview.html) | v7.3.0  |
+
+## Installing the Chart
+
+Add the [chart repository](https://helm.sh/docs/helm/helm_repo_add/), if not done before:
+
+```command
+helm repo add rhcharts https://ricardo-aires.github.io/helm-charts/
+```
+
+To [install](https://helm.sh/docs/helm/helm_install/) the chart with the release name `kafka`:
+
+```console
+$ helm upgrade --install kafka rhcharts/kafka
+Release "kafka" does not exist. Installing it now.
+NAME: kafka
+LAST DEPLOYED: Tue Nov 22 10:30:47 2022
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+NOTES:
+** Please be patient while the kafka chart is being deployed in release kafka **
+
+This chart bootstraps a Kafka Cluster made of "3" brokers using the Confluent stable version that can be accessed from within your cluster:
+
+    kafka-headless.default:9092
+
+More info:
+https://ricardo-aires.github.io/helm-charts/charts/kafka/
+$
+```
+
+By default, it will also install the [zookeeper](https://github.com/ricardo-aires/helm-charts/tree/main/charts/zookeeper).
+
+> If an external Zookeeper Ensemble is to be used turn `zookeeper.enabled` to `false` and include the `zookeeper.url`.
+
+These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+The chart will create the next resources, by default:
+
+![Kafka](./img/kafka.png)
+
+1. A [PodDisruptionBudget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) to ensure service availability during planned maintenance.
+1. A [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) to control the internal listener for the Kafka.
+1. A [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) which contains 3 Kafka Brokers [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/), by default.
+
+One can run the:
+
+- [helm list](https://helm.sh/docs/helm/helm_list/) command to list releases installed
+- [helm status](https://helm.sh/docs/helm/helm_status/) to display the status of the named release
+- [helm test](https://helm.sh/docs/helm/helm_test/) to run tests for a release
+
+To [uninstall](https://helm.sh/docs/helm/helm_uninstall/) the `kafka` deployment run:
+
+```console
+helm uninstall kafka
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+> Keep in mind that the [PersistentVolumeClaims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) are in retain.
+
+## Parameters
+
+You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm upgrade --install kafka -f my-values.yaml rhcharts/kafka
+```
+
+A default [values.yaml](./values.yaml) is available and should be checked for more advanced usage.
+
+### Image
+
+By default the [confluentinc/cp-kafka](https://hub.docker.com/r/confluentinc/cp-kafka) is in use.
+
+| Parameter          | Description                                   | Default                     |
+| ------------------ | --------------------------------------------- | --------------------------- |
+| `image.registry`   | Registry used to distribute the Docker Image. | `docker.io`                 |
+| `image.repository` | Docker Image of Confluent Kafka.              | `confluentinc/cp-kafka` |
+| `image.tag`        | Docker Image Tag of Confluent Kafka.          | `7.3.0`                     |
+
+One can easily change the `image.tag` to use another version. When using a local/proxy docker registry we must change `image.registry` as well.
+
+### Kafka Cluster
+
+The configuration parameters in this section control the resources requested and utilized by the kafka chart.
+
+| Parameter      | Description                  | Default |
+| -------------- | ---------------------------- | ------- |
+| `replicaCount` | The number of Kafka Brokers. | `3`     |
+
+> The value for the [PodDisruptionBudget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) is always `maxUnavailable` equals to `1`.
+
+### Confluent Kafka Broker Configuration
+
+The next configuration related to Kafka Broker are available:
+
+| Parameter                       | Description                                                                                                                                     | Default                 |
+| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `autoCreateTopicsEnable`        | Enable auto creation of topic on the server.                                                                                                    | `false`                 |
+| `deleteTopicEnable`             | Delete topic through the admin tool will have no effect if this config is turned off.                                                           | `true`                  |
+| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic.                                                                                                   | `3`                     |
+| `numPartitions`                 | The default number of log partitions per topic.                                                                                                 | `3`                     |
+| `defaultReplicationFactor`      | The default replication factors for automatically created topics.                                                                               | `3`                     |
+| `minInsyncReplicas`             | The minimum number of replicas that must acknowledge a write for the write to be considered successful.                                         | `2`                     |
+| `uncleanLeaderElectionEnable`   | Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss. | `false`                  |
+| `logFlushIntervalMessages`      | The number of messages accumulated on a log partition before messages are flushed to disk                                                       | `10000`                 |
+| `logFlushIntervalMs`            | The maximum time in ms that a message in any topic is kept in memory before flushed to disk.                                                    | `1000`                  |
+| `logRetentionBytes`             | The maximum size of the log before deleting it.                                                                                                 | `1073741824`            |
+| `logRetentionCheckIntervalMs`   | The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion.                                             | `300000`                |
+| `logRetentionHours`             | The number of hours to eep a log file before deleting it (in hours).                                                                            | `168`                   |
+| `logSegmentBytes`               | The maximum size of a single log file.                                                                                                          | `1073741824`            |
+| `messageMaxBytes`               | The largest record batch size allowed by Kafka (after compression if compression is enabled).                                                   | `1048588`               |
+
+More information can be found in the [Apache Kafka Documentation](https://kafka.apache.org/documentation/#brokerconfigs) and in the [Confluent Documentation](https://docs.confluent.io/platform/current/installation/configuration/broker-configs.html).
+
+### Ports used by Kafka
+
+For those still struggling with how the listeners work take a look at [Kafka Listeners - Explained](https://rmoff.net/2018/08/02/kafka-listeners-explained/) by [Robin Moffatt](https://twitter.com/rmoff/).
+
+By default the [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) will expose the pods in the port `9092`, `port.kafkaInternal`, and one can use this headless service as a Bootstrap Server.
+
+We have setup the possibility for external access by changing the next values:
+
+```yaml
+externalAccess:
+  enabled: true
+  initNodePort: 32400
+## turn to support nodePort in docker desktop
+isDocker: true
+```
+
+This will create a [nodeport service](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport) that will expose each broker in a different port. So, if we have the `32400` as a start and we have `3` replicas we will have:
+
+| pod       | map              |
+| --------- | -----------------|
+| `kafka-0` | `9094:32400/TCP` |
+| `kafka-1` | `9094:32401/TCP` |
+| `kafka-2` | `9094:32403/TCP` |
+
+### Kerberos Authentication
+
+This chart is prepared to enable [Kerberos authentication in Kafka](https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_gssapi.html#brokers)
+
+| Parameter               | Description                                | Default |
+| ----------------------- | ------------------------------------------ | ------- |
+| `kerberos.enabled`      | Boolean to control if Kerberos is enabled. | `false` |
+| `kerberos.krb5Conf`     | Name of the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that stores the `krb5.conf`, Kerberos [Configuration file](https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html) | `nil`**¹** |
+| `kerberos.keyTabSecret` | Name of the [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) that stores the [Keytab](https://web.mit.edu/kerberos/krb5-1.19/doc/basic/keytab_def.html) | `nil`**¹** |
+| `kerberos.jaasConf`     | Name of the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that stores the JAAS configuration files per host.  | `nil`**¹** |
+| `kerberos.testUserKeytabSecret` | Name of the [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) that stores the [Keytab](https://web.mit.edu/kerberos/krb5-1.19/doc/basic/keytab_def.html) for the test user. Mandatory when `kerberos.testUser` is set | `nil` |
+
+> **¹** When `kerberos.enabled` these parameters are required, and the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) and [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) need to exist beforehand.
+
+### Authorization Using ACLs
+
+This chart is prepared to enable [Authorization using ACLs in Kafka](https://docs.confluent.io/platform/current/kafka/authorization.html) but doesn't manage ACLs.
+
+> To enable ACLs, an authentication mechanism must also be enabled, e.g. Kerberos.
+
+| Parameter      | Description                            | Default |
+| -------------- | -------------------------------------- | ------- |
+| `acls.enabled` | Boolean to control if ACLs are enabled | `false` |
+
+### Data Persistence
+
+The Kafka Kafka Data directory can be tweaked with:
+
+| Parameter           | Description                                         | Default |
+| ------------------- | --------------------------------------------------- | ------- |
+| `data.storageClass` | Valid options: `nil`, `"-"`, or storage class name. | `nil`   |
+| `data.storageSize`  | Size for data dir.                                  | `10Gi`  |
+
+This will allow the creation of a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) using a specific [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/). However, [Access Mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+
+### Resources for Containers
+
+Regarding the management of [Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) the next defaults regarding requests and limits are set:
+
+With this in mind the next defaults regarding resources and limits are set:
+
+| Parameter                   | Description                                                             | Default  |
+| --------------------------- | ----------------------------------------------------------------------- | -------- |
+| `resources.limits.cpu`      | a container cannot use more CPU than the configured limit               | `1`      |
+| `resources.limits.memory`   | a container cannot use more Memory than the configured limit            | `1400Mi` |
+| `resources.requests.cpu`    | a container is guaranteed to be allocated as much CPU as it requests    | `250m`   |
+| `resources.requests.memory` | a container is guaranteed to be allocated as much Memory as it requests | `512Mi`  |
+
+In terms of the JVM the next default is set:
+
+| Parameter  | Description                            | Default                                                     |
+| ---------- | -------------------------------------- | ----------------------------------------------------------- |
+| `heapOpts` | The JVM Heap Options for Kafka Broker. | `"-XX:MaxRAMPercentage=75.0 -XX:InitialRAMPercentage=50.0"` |
+
+### Advance Configuration
+
+Check the `values.yaml` for more advance configuration such as:
+
+- [Liveness and Readiness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)
+- [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
+- [Container Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container)

+ 23 - 0
kafka/charts/zookeeper/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 20 - 0
kafka/charts/zookeeper/Chart.yaml

@@ -0,0 +1,20 @@
+apiVersion: v2
+appVersion: 7.3.0
+description: A Helm chart for Confluent Zookeeper on Kubernetes
+home: https://github.com/ricardo-aires/helm-charts/tree/main/charts/zookeeper
+icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/zookeeper.svg
+keywords:
+- zookeeper
+- confluent
+maintainers:
+- email: ricardoaireshenriques@gmail.com
+  name: Ricardo Henriques
+  url: https://github.com/ricardo-aires
+name: zookeeper
+sources:
+- https://github.com/ricardo-aires/helm-charts
+- https://docs.confluent.io/platform/current/zookeeper/deployment.html
+- https://zookeeper.apache.org/doc/current/index.html
+- https://hub.docker.com/r/confluentinc/cp-zookeeper
+type: application
+version: 0.2.0

+ 198 - 0
kafka/charts/zookeeper/README.md

@@ -0,0 +1,198 @@
+# zookeeper
+
+A Helm chart for Confluent Zookeeper on Kubernetes
+
+## Introduction
+
+This chart bootstraps an ensemble [Apache Zookeeper](https://zookeeper.apache.org) Servers using the [Confluent](https://docs.confluent.io/platform/current/zookeeper/deployment.html) stable version.
+
+[ZooKeeper](https://zookeeper.apache.org) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services, used as a backend by distributed applications.
+
+## Developing Environment
+
+| component                                                                      | version |
+| ------------------------------------------------------------------------------ | ------- |
+| [Podman](https://docs.podman.io/en/latest/)                                    | v4.3.1  |
+| [Minikube](https://minikube.sigs.k8s.io/docs/)                                 | v1.28.0 |
+| [Kubernetes](https://kubernetes.io)                                            | v1.25.3 |
+| [Helm](https://helm.sh)                                                        | v3.10.2 |
+| [Confluent Platform](https://docs.confluent.io/platform/current/overview.html) | v7.3.0  |
+| [Zookeeper](https://zookeeper.apache.org/doc/r3.6.3/index.html)                | v3.6.3  |
+
+## Installing the Chart
+
+Add the [chart repository](https://helm.sh/docs/helm/helm_repo_add/), if not done before:
+
+```shell
+helm repo add rhcharts https://ricardo-aires.github.io/helm-charts/
+```
+
+To [install](https://helm.sh/docs/helm/helm_install/) the chart with the release name `zkp`:
+
+```console
+$ helm upgrade --install zkp rhcharts/zookeeper
+Release "zkp" does not exist. Installing it now.
+NAME: zkp
+LAST DEPLOYED: Mon Nov 21 15:49:17 2022
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+NOTES:
+** Please be patient while the zookeeper chart is being deployed in release zkp **
+
+This chart bootstraps an ensemble Apache Zookeeper Servers made of "3" servers using the Confluent stable version that can be accessed from within your cluster:
+
+    zkp-zookeeper-headless.default:2181
+
+To connect to your ZooKeeper server run the following commands:
+
+    $ kubectl exec -it -n default zkp-zookeeper-0 -- zookeeper-shell zkp-zookeeper-headless.default:2181
+
+More info:
+https://ricardo-aires.github.io/helm-charts/charts/zookeeper/
+$
+```
+
+This command deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+The chart will create the next resources:
+
+![Zookeeper](./img/zookeeper.png)
+
+1. A [PodDisruptionBudget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) to ensure service availability during planned maintenance.
+1. A [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) to control the network domain for the ZooKeeper processes  and to expose the [AdminServer](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver_config).
+1. A [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) which contains 3 Zookeeper [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/), by default.
+
+One can run the:
+
+- [helm list](https://helm.sh/docs/helm/helm_list/) command to list releases installed
+- [helm status](https://helm.sh/docs/helm/helm_status/) to display the status of the named release
+- [helm test](https://helm.sh/docs/helm/helm_test/) to run tests for a release
+
+This chart uses the `srvr` command of the Zookeeper [Four Letter Words](https://zookeeper.apache.org/doc/r3.6.2/zookeeperAdmin.html#sc_4lw) to check that every Zookeeper Server is responding.
+
+To [uninstall](https://helm.sh/docs/helm/helm_uninstall/) the `zkp` deployment run:
+
+```console
+helm uninstall zkp
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+> Keep in mind that the [PersistentVolumeClaims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) are in retain.
+
+## Parameters
+
+You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+helm upgrade --install zkp -f my-values.yaml rhcharts/zookeeper
+```
+
+A default [values.yaml](./values.yaml) is available and should be checked for more advanced usage.
+
+### Image
+
+By default the [confluentinc/cp-zookeeper](https://hub.docker.com/r/confluentinc/cp-zookeeper) is in use.
+
+| Parameter          | Description                                   | Default                     |
+| ------------------ | --------------------------------------------- | --------------------------- |
+| `image.registry`   | Registry used to distribute the Docker Image. | `docker.io`                 |
+| `image.repository` | Docker Image of Confluent Zookeeper.          | `confluentinc/cp-zookeeper` |
+| `image.tag`        | Docker Image Tag of Confluent Zookeeper.      | `7.3.0`                     |
+
+One can easily change the `image.tag` to use another version. When using a local/proxy docker registry we must change `image.registry` as well.
+
+### Zookeeper Ensemble
+
+The configuration parameters in this section control the resources requested and utilized by the zookeeper chart.
+
+| Parameter      | Description                      | Default |
+| -------------- | -------------------------------- | ------- |
+| `replicaCount` | The number of ZooKeeper servers. | `3`     |
+
+A minimum of three servers are required for a fault tolerant clustered setup, and it is strongly recommended that you have an odd number of servers, because Zookeeper requires a majority.
+
+> The value for the [PodDisruptionBudget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) is calculated automatically from the given `replicaCount`.
+
+### Confluent Zookeeper Configuration
+
+The next configuration related to Zookeeper are available:
+
+| Parameter                   | Description                                                                                                             | Default               |
+| --------------------------- | ----------------------------------------------------------------------------------------------------------------------- | --------------------- |
+| `tickTime`                  | The length of a single tick, which is the basic time unit used by ZooKeeper, as measured in milliseconds.               | `2000`                |
+| `initLimit`                 | Amount of time, in ticks (see `tickTime`), to allow followers to connect and sync to a leader.                          | `10`                  |
+| `syncLimit`                 | Amount of time, in ticks (see `tickTime`), to allow followers to sync with ZooKeeper.                                   | `5`                   |
+| `maxClientCnxns`            | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble. | `60`                  |
+| `autopurge.purgeInterval`   | The time interval in hours for which the purge task has to be triggered.                                                | `24`                  |
+| `autopurge.snapRetainCount` | umber of most recent snapshots and the corresponding transaction logs in the dataDir and dataLogDir to keep.            | `3`                   |
+| `quorumListenOnAllIPs`      | When set to true the ZooKeeper server will listen for connections from its peers on all available IP addresses          | `true`                |
+| `maxSessionTimeout`         | The maximum session timeout in milliseconds that the server will allow the client to negotiate.                         | `40000`               |
+| `adminEnableServer`         | Flag for the [AdminServer](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_adminserver)                 | `true`                |
+| `log4jRootLogLevel`         | Log level of ZooKeeper server                                                                                           |  `INFO`               |
+
+More information can be found in the [Apache Zookeeper Documentation](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_configuration) and in the [Confluent Documentation](https://docs.confluent.io/platform/current/zookeeper/deployment.html).
+
+### Ports used by Zookeeper
+
+ZooKeeper default ports:
+
+| Parameter     | Description                                                                                      | Default |
+| ------------- | ------------------------------------------------------------------------------------------------ | ------- |
+| `port.peers`  | The port on which the ZooKeeper servers listen for requests from other servers in the ensemble.  | `2888`  |
+| `port.leader` | The port on which the ZooKeeper servers perform leader election.                                 | `3888`  |
+| `port.client` | The port to listen for client connections; that is, the port that clients attempt to connect to. | `2181`  |
+
+Since 3.5.0 we may also set the [AdminServer](https://zookeeper.apache.org/doc/r3.6.1/zookeeperAdmin.html#sc_adminserver) which by default listens in the `8080` port but can be changed by setting the `port.admin` in the [AdminServer configuration](https://zookeeper.apache.org/doc/r3.6.1/zookeeperAdmin.html#sc_adminserver_config).
+
+### Enable Kerberos
+
+This chart is prepared to enable [Kerberos authentication in Zookeeper](https://docs.confluent.io/platform/current/security/zk-security.html#sasl-with-kerberos)
+
+| Parameter               | Description                                | Default |
+| ----------------------- | ------------------------------------------ | ------- |
+| `kerberos.enabled`      | Boolean to control if Kerberos is enabled. | `false` |
+| `kerberos.krb5Conf`     | Name of the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that stores the `krb5.conf`, Kerberos [Configuration file](https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html) | `nil`**¹** |
+| `kerberos.keyTabSecret` | Name of the [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) that stores the [Keytab](https://web.mit.edu/kerberos/krb5-1.19/doc/basic/keytab_def.html) | `nil`**¹** |
+| `kerberos.jaasConf`     | Name of the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that stores the JAAS configuration files per host.  | `nil`**¹** |
+
+> **¹** When `kerberos.enabled` these parameters are required, and the [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) and [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) need to exist before.
+
+### Data Persistence
+
+The ZooKeeper server continually saves `znode` snapshot files in a Data Directory to enable you to recover data, transactional logs in this deployment are store in a separated directory.
+
+| Parameter           | Description                                         | Default |
+| ------------------- | --------------------------------------------------- | ------- |
+| `data.storageClass` | Valid options: `nil`, `"-"`, or storage class name. | `nil`   |
+| `data.storageSize`  | Size for data dir.                                  | `1Gi`   |
+
+This will allow the creation of two [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) using a specific [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) and the same size for both.
+
+### Resources for Containers
+
+Regarding the management of [Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) the next defaults regarding resources and limits are set:
+
+| Parameter                   | Description                                                             | Default |
+| --------------------------- | ----------------------------------------------------------------------- | ------- |
+| `resources.limits.cpu`      | a container cannot use more CPU than the configured limit               | `200m`  |
+| `resources.limits.memory`   | a container cannot use more Memory than the configured limit            | `650Mi` |
+| `resources.requests.cpu`    | a container is guaranteed to be allocated as much CPU as it requests    | `100m`   |
+| `resources.requests.memory` | a container is guaranteed to be allocated as much Memory as it requests | `320Mi` |
+
+In terms of the JVM the next default is set:
+
+| Parameter  | Description                                | Default                                                     |
+| ---------- | ------------------------------------------ | ----------------------------------------------------------- |
+| `heapOpts` | The JVM Heap Options for Zookeeper Server. | `"-XX:MaxRAMPercentage=75.0 -XX:InitialRAMPercentage=50.0"` |
+
+### Advance Configuration
+
+Check the `values.yaml` for more advance configuration such as:
+
+- [Liveness and Readiness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)
+- [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
+- [Container Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container)

BIN
kafka/charts/zookeeper/img/zookeeper.png


+ 12 - 0
kafka/charts/zookeeper/templates/NOTES.txt

@@ -0,0 +1,12 @@
+** Please be patient while the {{ .Chart.Name }} chart is being deployed in release {{ .Release.Name }} **
+
+This chart bootstraps an ensemble Apache Zookeeper Servers made of {{ .Values.replicaCount | quote }} servers using the Confluent stable version that can be accessed from within your cluster:
+
+    {{ include "zookeeper.fullname" . }}-headless.{{ .Release.Namespace }}:{{ .Values.port.client }}
+
+To connect to your ZooKeeper server run the following commands:
+
+    $ kubectl exec -it -n {{ .Release.Namespace }} {{ include "zookeeper.fullname" . }}-0 -- zookeeper-shell {{ include "zookeeper.fullname" . }}-headless.{{ .Release.Namespace }}:{{ .Values.port.client }}
+
+More info:
+https://ricardo-aires.github.io/helm-charts/charts/zookeeper/

+ 76 - 0
kafka/charts/zookeeper/templates/_helpers.tpl

@@ -0,0 +1,76 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "zookeeper.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "zookeeper.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "zookeeper.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "zookeeper.labels" -}}
+helm.sh/chart: {{ include "zookeeper.chart" . }}
+{{ include "zookeeper.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "zookeeper.selectorLabels" -}}
+app: {{ .Release.Name }}-{{ include "zookeeper.name" . }}
+app.kubernetes.io/name: {{ include "zookeeper.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create a server list string based on fullname, namespace, # of servers
+in a format like "zkhost1:port:port;zkhost2:port:port"
+*/}}
+{{- define "zookeeper.serverlist" -}}
+{{- $namespace := .Release.Namespace }}
+{{- $name := include "zookeeper.fullname" . -}}
+{{- $peersPort := .Values.port.peers -}}
+{{- $leaderElectionPort := .Values.port.leader -}}
+{{- $zk := dict "servers" (list) -}}
+{{- range $idx, $v := until (int .Values.replicaCount) }}
+{{- $noop := printf "%s-%d.%s-headless.%s.svc.cluster.local:%d:%d" $name $idx $name $namespace (int $peersPort) (int $leaderElectionPort) | append $zk.servers | set $zk "servers" -}}
+{{- end }}
+{{- printf "%s" (join ";" $zk.servers) | quote -}}
+{{- end -}}
+
+{{/*
+Set the minimum number of servers that must be available during evictions.
+This should be (replicaCount/2) + 1
+*/}}
+{{- define "zookeeper.minAvailable" -}}
+minAvailable: {{ (add (div .Values.replicaCount 2) 1) }}
+{{- end }}%

+ 29 - 0
kafka/charts/zookeeper/templates/headless-service.yaml

@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "zookeeper.fullname" . }}-headless
+  labels:
+    {{- include "zookeeper.labels" . | nindent 4 }}
+spec:
+  type: ClusterIP
+  clusterIP: None
+  publishNotReadyAddresses: true
+  ports:
+  - name: tcp-peers
+    port: {{ .Values.port.peers }}
+    protocol: TCP
+    targetPort: tcp-peers
+  - name: tcp-leader
+    port: {{ .Values.port.leader }}
+    protocol: TCP
+    targetPort: tcp-leader
+  - name: http-admin
+    port: {{ .Values.port.admin }}
+    protocol: TCP
+    targetPort: http-admin
+  - name: tcp-client
+    port: {{ .Values.port.client }}
+    protocol: TCP
+    targetPort: tcp-client
+  selector:
+    {{- include "zookeeper.selectorLabels" . | nindent 4 }}

+ 11 - 0
kafka/charts/zookeeper/templates/poddisruptionbudget.yaml

@@ -0,0 +1,11 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ include "zookeeper.fullname" . }}-pdb
+  labels:
+    {{- include "zookeeper.labels" . | nindent 4 }}
+spec:
+  {{- include "zookeeper.minAvailable" . | nindent 2 }}
+  selector:
+    matchLabels:
+      {{- include "zookeeper.selectorLabels" . | nindent 6 }}

+ 4 - 0
kafka/charts/zookeeper/templates/service-account.yaml

@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "zookeeper.fullname" . }}-sa

+ 186 - 0
kafka/charts/zookeeper/templates/statefulset.yaml

@@ -0,0 +1,186 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{ include "zookeeper.fullname" . }}
+  labels:
+    {{- include "zookeeper.labels" . | nindent 4 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "zookeeper.selectorLabels" . | nindent 6 }}
+  serviceName: {{ include "zookeeper.fullname" . }}-headless
+  replicas: {{ .Values.replicaCount }}
+  updateStrategy:
+    type: RollingUpdate
+  podManagementPolicy: Parallel
+  template:
+    metadata:
+      labels:
+        {{- include "zookeeper.selectorLabels" . | nindent 8 }}
+    spec:
+      serviceAccountName: {{ include "zookeeper.fullname" . }}-sa
+      securityContext:
+        {{- toYaml .Values.podSecurityContext | nindent 8 }}
+      affinity:
+      {{- if .Values.affinity }}
+        {{ toYaml .Values.affinity | indent 8 }}
+      {{- else }}
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - {{ include "zookeeper.name" . }}
+              topologyKey: kubernetes.io/hostname
+            weight: 1
+     {{- end }}
+      containers:
+      - name: {{ .Chart.Name }}
+      {{- with .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+        env:
+        - name: ZOOKEEPER_CLIENT_PORT
+          value: {{ .Values.port.client | int | quote }}
+        - name: ZOOKEEPER_TICK_TIME
+          value: {{ .Values.tickTime | int | quote }}
+        - name: ZOOKEEPER_INIT_LIMIT
+          value: {{ .Values.initLimit | int | quote }}
+        - name: ZOOKEEPER_SYNC_LIMIT
+          value: {{ .Values.syncLimit | int | quote }}
+        - name: ZOOKEEPER_MAX_CLIENT_CNXNS
+          value: {{ .Values.maxClientCnxns | int | quote }}
+        - name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
+          value: {{ .Values.autopurge.purgeInterval | int | quote }}
+        - name: ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT
+          value: {{ .Values.autopurge.snapRetainCount | int | quote }}
+        - name: ZOOKEEPER_QUORUM_LISTEN_ON_ALL_IPS
+          value: {{ .Values.quorumListenOnAllIPs | quote }}
+        - name: ZOOKEEPER_MAX_SESSION_TIMEOUT
+          value: {{ .Values.maxSessionTimeout | int | quote }}
+        - name: ZOOKEEPER_ADMIN_ENABLE_SERVER
+          value: {{ .Values.adminEnableServer  | quote }}
+        - name: ZOOKEEPER_LOG4J_ROOT_LOGLEVEL
+          value: {{ .Values.log4jRootLogLevel  | quote }}
+        - name: KAFKA_HEAP_OPTS
+          value: {{ .Values.heapOpts | quote }}
+        - name : ZOOKEEPER_SERVERS
+          value: {{ include "zookeeper.serverlist" . }}
+        {{- if .Values.kerberos.enabled }}
+        - name: ZOOKEEPER_AUTH_PROVIDER_SASL
+          value: "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"
+        - name: ZOOKEEPER_KERBEROS_REMOVE_HOST_FROM_PRINCIPAL
+          value: "true"
+        - name: ZOOKEEPER_KERBEROS_REMOVE_REALM_FROM_PRINCIPAL
+          value: "true"
+        {{- end }}
+        - name: ZOOKEEPER_SERVER_ID
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        command:
+        - "bash"
+        - "-c"
+        - |
+          ZK_FIX_HOST_REGEX="s/${HOSTNAME}\.[^:]*:/0.0.0.0:/g"
+          ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-}+1)) \
+          ZOOKEEPER_SERVERS=`echo $ZOOKEEPER_SERVERS | sed -e "$ZK_FIX_HOST_REGEX"` \
+          {{- if .Values.kerberos.enabled }}
+          KAFKA_OPTS="-Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/jaas/${HOSTNAME}_jaas.conf -Dsun.security.krb5.debug=false \
+                      -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true" \
+          {{- end }}
+          /etc/confluent/docker/run
+        ports:
+        - name: tcp-client
+          containerPort: {{ .Values.port.client }}
+        - name: tcp-peers
+          containerPort: {{ .Values.port.peers }}
+        - name: tcp-leader
+          containerPort: {{ .Values.port.leader }}
+        - name: http-admin
+          containerPort: {{ .Values.port.admin }}
+        {{- if .Values.livenessProbe.enabled }}
+        livenessProbe:
+          httpGet:
+            path: /commands/ruok
+            port: http-admin
+          initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.livenessProbe.successThreshold }}
+          failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+        {{- end }}
+        {{- if .Values.readinessProbe.enabled }}
+        readinessProbe:
+          httpGet:
+            path: /commands/ruok
+            port: http-admin
+          initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.readinessProbe.successThreshold }}
+          failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+        {{- end }}
+        volumeMounts:
+        - name: data
+          mountPath: /var/lib/zookeeper/data
+        - name: log
+          mountPath: /var/lib/zookeeper/log
+        - name: config
+          mountPath: /etc/kafka
+        - name: logs
+          mountPath: /var/log
+        {{- if .Values.kerberos.enabled }}
+        - name: krb5
+          mountPath: /etc/krb5.conf
+          subPath: krb5.conf
+        - name: keytabs
+          mountPath: /keytabs
+          readOnly: true
+        - name: jaas
+          mountPath: /jaas
+          readOnly: true
+        {{- end }}
+        securityContext:
+          {{- toYaml .Values.securityContext | nindent 12 }}
+        resources:
+          {{- toYaml .Values.resources | nindent 12 }}
+      volumes:
+      - name: config
+        emptyDir: {}
+      - name: logs
+        emptyDir: {}
+      {{- if .Values.kerberos.enabled }}
+      - name: krb5
+        configMap:
+          name: {{ required "The .Values.kerberos.krb5Conf is required when kerberos enabled!" .Values.kerberos.krb5Conf }}
+      - name: keytabs
+        secret:
+          secretName: {{ required "The .Values.kerberos.keyTabSecret is required when kerberos enabled!" .Values.kerberos.keyTabSecret }}
+      - name: jaas
+        configMap:
+          name: {{ required "The .Values.kerberos.jaasConf is required when kerberos enabled!" .Values.kerberos.jaasConf }}
+      {{- end }}
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      storageClassName: {{ .Values.data.storageClass | quote }}
+      accessModes: [ "ReadWriteOnce" ]
+      resources:
+        requests:
+          storage: {{ .Values.data.storageSize | quote }}
+  - metadata:
+      name: log
+    spec:
+      storageClassName: {{ .Values.data.storageClass | quote }}
+      accessModes: [ "ReadWriteOnce" ]
+      resources:
+        requests:
+          storage: {{ .Values.data.storageSize | quote }}

+ 34 - 0
kafka/charts/zookeeper/templates/tests/test-connection.yaml

@@ -0,0 +1,34 @@
+{{- $root := . }}
+{{- $fullName := include "zookeeper.fullname" . }}
+{{- $port := .Values.port.admin | int }}
+{{- $replicaCount := .Values.replicaCount | int }}
+
+{{- range $i, $e := until $replicaCount }}
+{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }}
+{{- $_ := set $ "targetPod" $targetPod }}
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ $targetPod}}-test"
+  labels:
+    {{- include "zookeeper.labels" $root | nindent 4 }}
+  annotations:
+    "helm.sh/hook": test
+    "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+spec:
+  serviceAccountName: {{ $fullName }}-sa
+  containers:
+    - name: wget
+      image: busybox
+      command: ['wget']
+      args: ['-qO-', '{{ $targetPod}}.{{ $fullName }}-headless:{{ $port }}/commands/srvr']
+      resources:
+        requests:
+          memory: "200Mi"
+          cpu: "10m"
+        limits:
+          memory: "200Mi"
+          cpu: "10m"
+  restartPolicy: Never
+{{- end }}

+ 100 - 0
kafka/charts/zookeeper/values.yaml

@@ -0,0 +1,100 @@
+# Default values for confluent.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Image Info
+## ref: https://hub.docker.com/r/confluentinc/cp-zookeeper
+image:
+  registry: docker.io
+  repository: confluentinc/cp-zookeeper
+  # Overrides the image tag whose default is the chart appVersion.
+  tag: ""
+  pullPolicy: IfNotPresent
+imagePullSecrets:  []
+nameOverride: ""
+fullnameOverride: ""
+
+## ZooKeeper Ensemble
+## ref: https://zookeeper.apache.org/doc/r3.6.2/zookeeperAdmin.html#sc_zkMulitServerSetup
+replicaCount: 3
+
+## Zookeeper Configuration
+## ref: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_configuration
+tickTime: 2000
+initLimit: 10
+syncLimit: 5
+maxClientCnxns: 60
+autopurge:
+  purgeInterval: 24
+  snapRetainCount: 3
+quorumListenOnAllIPs: true
+maxSessionTimeout: 40000
+adminEnableServer: true
+heapOpts: "-XX:MaxRAMPercentage=75.0 -XX:InitialRAMPercentage=50.0"
+log4jRootLogLevel: INFO
+
+## Ports used by Zookeeper
+## ref: https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_configuration
+port:
+  peers: 2888
+  leader: 3888
+  admin: 8080
+  client: 2181
+
+## Zookeeper Kerberos
+## ref: https://docs.confluent.io/platform/current/security/zk-security.html#sasl-with-kerberos
+kerberos:
+  enabled: false
+  krb5Conf:
+  keyTabSecret:
+  jaasConf:
+
+## Data Persistence
+## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+data:
+  storageClass:
+  storageSize: 1Gi
+
+## Configure Liveness and Readiness Probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Configure Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+podSecurityContext:
+  fsGroup: 1000
+
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+securityContext:
+  allowPrivilegeEscalation: false
+  readOnlyRootFilesystem: true
+  runAsUser: 1000
+  runAsGroup: 1000
+  capabilities:
+    drop:
+    - ALL
+
+## Configure Resources for Containers
+## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+resources:
+  limits:
+    cpu: 200m
+    memory: 650Mi
+  requests:
+    cpu: 100m
+    memory: 320Mi

BIN
kafka/img/kafka.png


+ 8 - 0
kafka/templates/NOTES.txt

@@ -0,0 +1,8 @@
+** Please be patient while the {{ .Chart.Name }} chart is being deployed in release {{ .Release.Name }} **
+
+This chart bootstraps a Kafka Cluster made of {{ .Values.replicaCount | quote }} brokers using the Confluent stable version that can be accessed from within your cluster:
+
+    {{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}:{{ .Values.port.kafkaInternal }}
+
+More info:
+https://ricardo-aires.github.io/helm-charts/charts/kafka/

+ 92 - 0
kafka/templates/_helpers.tpl

@@ -0,0 +1,92 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kafka.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kafka.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kafka.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "kafka.labels" -}}
+helm.sh/chart: {{ include "kafka.chart" . }}
+{{ include "kafka.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "kafka.selectorLabels" -}}
+app: {{ .Release.Name }}-{{ include "kafka.name" . }}
+app.kubernetes.io/name: {{ include "kafka.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create a default kafka fully qualified domain name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "kafka.listener" -}}
+{{- $namespace := .Release.Namespace }}
+{{- printf "${POD_NAME}.%s-headless.%s.svc.cluster.local" (include "kafka.fullname" .) $namespace | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default kafka bootstrap server name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "kafka.bootstrap.server" -}}
+{{- $namespace := .Release.Namespace }}
+{{- printf "%s-0.%s-headless.%s.svc.cluster.local" (include "kafka.fullname" .) (include "kafka.fullname" .) $namespace | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified zookeeper name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "kafka.zookeeper.fullname" -}}
+{{- $name := default "zookeeper" (index .Values "zookeeper" "nameOverride") -}}
+{{- printf "%s-%s-headless" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Form the Zookeeper URL. If zookeeper is installed as part of this chart, use k8s service discovery,
+else use user-provided URL
+*/}}
+{{- define "kafka.zookeeper.ensemble" }}
+{{- if (index .Values "zookeeper" "enabled") -}}
+{{- $clientPort := default 2181 (index .Values "zookeeper" "port" "client") | int -}}
+{{- printf "%s:%d" (include "kafka.zookeeper.fullname" .) $clientPort }}
+{{- else -}}
+{{- printf "%s" (index .Values "zookeeper" "url") }}
+{{- end -}}
+{{- end -}}

+ 21 - 0
kafka/templates/headless-service.yaml

@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "kafka.fullname" . }}-headless
+  labels:
+    {{- include "kafka.labels" . | nindent 4 }}
+spec:
+  type: ClusterIP
+  clusterIP: None
+  publishNotReadyAddresses: true
+  ports:
+  - name: tcp-kafka-int
+    port: {{ .Values.port.kafkaInternal }}
+    protocol: TCP
+    targetPort: tcp-kafka-int
+  - name: tcp-kafka-ext
+    port: {{ .Values.port.kafkaExternal }}
+    protocol: TCP
+    targetPort: tcp-kafka-ext
+  selector:
+    {{- include "kafka.selectorLabels" . | nindent 4 }}

+ 30 - 0
kafka/templates/nodeport-service.yaml

@@ -0,0 +1,30 @@
+{{- if .Values.externalAccess.enabled }}
+{{- $root := . }}
+{{- $fullName := include "kafka.fullname" . }}
+{{- $initNodePort := .Values.externalAccess.initNodePort | int }}
+{{- $replicaCount := .Values.replicaCount | int }}
+
+{{- range $i, $e := until $replicaCount }}
+{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }}
+{{- $_ := set $ "targetPod" $targetPod }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "kafka.fullname" $root }}-{{ $i }}-external
+  labels:
+    {{- include "kafka.labels" $root | nindent 4 }}
+    statefulset.kubernetes.io/pod-name: {{ $targetPod }}
+spec:
+  type: NodePort
+  ports:
+  - name: tcp-kafka-ext
+    protocol: TCP
+    targetPort: tcp-kafka-ext
+    port: 9094
+    nodePort: {{ add $initNodePort $i }}
+  selector:
+    {{- include "kafka.selectorLabels" $root | nindent 4 }}
+    statefulset.kubernetes.io/pod-name: {{ $targetPod }}
+{{- end }}
+{{- end }}

+ 11 - 0
kafka/templates/poddisruptionbudget.yaml

@@ -0,0 +1,11 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ include "kafka.fullname" . }}-pdb
+  labels:
+    {{- include "kafka.labels" . | nindent 4 }}
+spec:
+  maxUnavailable: {{ .Values.maxUnavailable }}
+  selector:
+    matchLabels:
+      {{- include "kafka.selectorLabels" . | nindent 6 }}

+ 4 - 0
kafka/templates/service-account.yaml

@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "kafka.fullname" . }}-sa

+ 221 - 0
kafka/templates/statefulset.yaml

@@ -0,0 +1,221 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: {{ include "kafka.fullname" . }}
+  labels:
+    {{- include "kafka.labels" . | nindent 4 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "kafka.selectorLabels" . | nindent 6 }}
+  serviceName: {{ include "kafka.fullname" . }}-headless
+  replicas: {{ .Values.replicaCount }}
+  updateStrategy:
+    type: RollingUpdate
+  podManagementPolicy: Parallel
+  template:
+    metadata:
+      labels:
+        {{- include "kafka.selectorLabels" . | nindent 8 }}
+    spec:
+      serviceAccountName: {{ include "kafka.fullname" . }}-sa
+      securityContext:
+        {{- toYaml .Values.podSecurityContext | nindent 8 }}
+      affinity:
+      {{- if .Values.affinity }}
+        {{ toYaml .Values.affinity | indent 8 }}
+      {{- else }}
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+          - podAffinityTerm:
+              labelSelector:
+                matchExpressions:
+                - key: app
+                  operator: In
+                  values:
+                  - {{ include "kafka.name" . }}
+              topologyKey: kubernetes.io/hostname
+            weight: 1
+     {{- end }}
+      containers:
+      - name: {{ .Chart.Name }}
+      {{- with .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+        env:
+        - name: POD_IP
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: status.podIP
+        - name: HOST_IP
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: status.hostIP
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: KAFKA_ZOOKEEPER_CONNECT
+          value: {{ include "kafka.zookeeper.ensemble" . | quote }}
+        - name: KAFKA_HEAP_OPTS
+          value: {{ .Values.heapOpts | quote }}
+        - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
+          value: {{ .Values.confluentSupportMetricsEnable | quote }}
+        - name: KAFKA_LOG_DIRS
+          value: "/var/lib/kafka/data"
+        - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE
+          value: {{ .Values.autoCreateTopicsEnable | quote }}
+        - name: KAFKA_DELETE_TOPIC_ENABLE
+          value: {{ .Values.deleteTopicEnable | quote }}
+        - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
+          value: {{ .Values.offsetsTopicReplicationFactor | int | quote }}
+        - name: KAFKA_NUM_PARTITIONS
+          value: {{ .Values.numPartitions | int | quote }}
+        - name: KAFKA_DEFAULT_REPLICATION_FACTOR
+          value: {{ .Values.defaultReplicationFactor | int | quote }}
+        - name: KAFKA_MIN_INSYNC_REPLICAS
+          value: {{ .Values.minInsyncReplicas | int | quote }}
+        - name: KAFKA_UNCLEAN_LEADER_ELECTION.ENABLE
+          value: {{ .Values.uncleanLeaderElectionEnable | quote }}
+        - name: KAFKA_LOG_FLUSH_INTERVAL_MESSAGES
+          value: {{ .Values.logFlushIntervalMessages | int | quote }}
+        - name: KAFKA_LOG_FLUSH_INTERVAL_MS
+          value: {{ .Values.logFlushIntervalMs | int | quote }}
+        - name: KAFKA_LOG_RETENTION_BYTES
+          value: {{ .Values.logRetentionBytes | int | quote }}
+        - name: KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS
+          value: {{ .Values.logRetentionCheckIntervalMs | int | quote }}
+        - name: KAFKA_LOG_RETENTION_HOURS
+          value: {{ .Values.logRetentionHours | int | quote }}
+        - name: KAFKA_LOG_SEGMENT_BYTES
+          value: {{ .Values.logSegmentBytes | int | quote }}
+        - name: KAFKA_MESSAGE_MAX_BYTES
+          value: {{ .Values.messageMaxBytes | int | quote }}
+        - name: KAFKA_LOG4J_ROOT_LOGLEVEL
+          value: {{ .Values.log4jRootLoglevel | quote }}
+        - name: KAFKA_LOG4J_LOGGERS
+          value: {{ .Values.log4jLoggers | quote }}
+        {{- if .Values.kerberos.enabled }}
+        - name: KAFKA_SECURITY_INTER_BROKER_PROTOCOL
+          value: SASL_PLAINTEXT
+        - name: KAFKA_SASL_KERBEROS_SERVICE_NAME
+          value: "kafka"
+        - name: KAFKA_LISTENERS
+          value: "SASL_PLAINTEXT://:{{ .Values.port.kafkaInternal }},EXTERNAL://:{{ .Values.port.kafkaExternal }}"
+        - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
+          value: "SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT"
+        {{- else }}
+        - name: KAFKA_LISTENERS
+          value: "PLAINTEXT://:{{ .Values.port.kafkaInternal }},EXTERNAL://:{{ .Values.port.kafkaExternal }}"
+        - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
+          value: "PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
+        {{- end }}
+        - name: ZOOKEEPER_SASL_ENABLED
+          value: {{ .Values.zookeeper.kerberos.enabled | quote }}
+        {{- if .Values.acls.enabled }}
+        - name: KAFKA_ZOOKEEPER_SET_ACL
+          value: "true"
+        - name: KAFKA_SUPER_USERS
+          value: "User:kafka"
+        - name: KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND
+          value: "false"
+        - name: KAFKA_AUTHORIZER_CLASS_NAME
+          value: "kafka.security.authorizer.AclAuthorizer"
+        {{- end }}
+        command:
+        - "sh"
+        - "-exc"
+        - |
+          export KAFKA_BROKER_ID=${HOSTNAME##*-} && \
+          {{- if .Values.isDocker }}
+          export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://{{ include "kafka.listener" . }}:{{ .Values.port.kafkaInternal }},EXTERNAL://127.0.0.1:$(({{ .Values.externalAccess.initNodePort }} + ${KAFKA_BROKER_ID})) && \
+          {{- else if .Values.kerberos.enabled }}
+          export KAFKA_ADVERTISED_LISTENERS=SASL_PLAINTEXT://{{ include "kafka.listener" . }}:{{ .Values.port.kafkaInternal }},EXTERNAL://{{ include "kafka.listener" . }}:$(({{ .Values.externalAccess.initNodePort }} + ${KAFKA_BROKER_ID})) && \
+          export KAFKA_OPTS="-Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/jaas/${POD_NAME}_jaas.conf -Dsun.security.krb5.debug=false" && \
+          {{- else }}
+          export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://{{ include "kafka.listener" . }}:{{ .Values.port.kafkaInternal }},EXTERNAL://${HOST_IP}:$(({{ .Values.externalAccess.initNodePort }} + ${KAFKA_BROKER_ID})) && \
+          {{- end }}
+          exec /etc/confluent/docker/run
+        ports:
+        - name: tcp-kafka-int
+          containerPort: {{ .Values.port.kafkaInternal }}
+        - name: tcp-kafka-ext
+          containerPort: {{ .Values.port.kafkaExternal }}
+        {{- if .Values.livenessProbe.enabled }}
+        livenessProbe:
+          tcpSocket:
+            port: tcp-kafka-int
+          initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.livenessProbe.successThreshold }}
+          failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+        {{- end }}
+        {{- if .Values.readinessProbe.enabled }}
+        readinessProbe:
+          tcpSocket:
+            port: tcp-kafka-int
+          initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+          timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+          successThreshold: {{ .Values.readinessProbe.successThreshold }}
+          failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+        {{- end }}
+        volumeMounts:
+        - name: data
+          mountPath: /var/lib/kafka/data
+        - name: config
+          mountPath: /etc/kafka
+        - name: logs
+          mountPath: /var/log
+        {{- if or .Values.kerberos.enabled .Values.zookeeper.kerberos.enabled }}
+        - name: krb5
+          mountPath: /etc/krb5.conf
+          subPath: krb5.conf
+        - name: keytabs
+          mountPath: /keytabs
+          readOnly: true
+        - name: jaas
+          mountPath: /jaas
+          readOnly: true
+        {{- end }}
+        securityContext:
+          {{- toYaml .Values.securityContext | nindent 12 }}
+        resources:
+          {{- toYaml .Values.resources | nindent 12 }}
+      volumes:
+      - name: config
+        emptyDir: {}
+      - name: logs
+        emptyDir: {}
+      {{- if .Values.kerberos.enabled }}
+      - name: krb5
+        configMap:
+          name: {{ required "The .Values.kerberos.krb5Conf is required when kerberos enabled!" .Values.kerberos.krb5Conf }}
+      - name: keytabs
+        secret:
+          secretName: {{ required "The .Values.kerberos.keyTabSecret is required when kerberos enabled!" .Values.kerberos.keyTabSecret }}
+      - name: jaas
+        configMap:
+          name: {{ required "The .Values.kerberos.jaasConf is required when kerberos enabled!" .Values.kerberos.jaasConf }}
+      {{- end }}
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      storageClassName: {{ .Values.data.storageClass | quote }}
+      accessModes: [ "ReadWriteOnce" ]
+      resources:
+        requests:
+          storage: {{ .Values.data.storageSize | quote }}

+ 91 - 0
kafka/templates/tests/test-connection.yaml

@@ -0,0 +1,91 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "kafka.fullname" . }}-test"
+  annotations:
+    "helm.sh/hook": test
+    "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+spec:
+  serviceAccountName: {{ include "kafka.fullname" . }}-sa
+  restartPolicy: Never
+  containers:
+  - name: {{ include "kafka.fullname" . }}-test
+    image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+    imagePullPolicy: "{{ .Values.image.pullPolicy }}"
+    {{- if not .Values.kerberos.enabled }}
+    command:
+    - sh
+    - -c
+    - |
+      set -ex
+      # Delete the topic if it exists
+      kafka-topics --bootstrap-server ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME} --delete --if-exists
+      # Create the topic
+      kafka-topics --bootstrap-server ${BOOTSTRAP_SERVER} --create --topic ${TOPIC_NAME} --partitions {{ .Values.replicaCount }} --replication-factor 1 --if-not-exists || exit 1
+      # Create a message
+      MESSAGE="`date -u`" || exit 1
+      # Produce a test message to the topic
+      (echo "$MESSAGE" | kafka-console-producer --broker-list ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME}) || exit 1
+      # Consume a test message from the topic
+      kafka-console-consumer --bootstrap-server ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME} --group ${GROUP_NAME} --from-beginning --timeout-ms 10000 --max-messages 1 | grep "$MESSAGE"
+    {{- end }}
+    {{- if .Values.kerberos.enabled }}
+    command:
+    - sh
+    - -c
+    - |
+      set -ex
+      # Delete the topic if it exists
+      kafka-topics --bootstrap-server ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME} --delete --if-exists --command-config /etc/kafka/kafka-client.properties
+      # Create the topic
+      kafka-topics --bootstrap-server ${BOOTSTRAP_SERVER} --create --topic ${TOPIC_NAME} --partitions {{ .Values.replicaCount }} --replication-factor 1 --if-not-exists --command-config /etc/kafka/kafka-client.properties || exit 1
+      # Create a message
+      MESSAGE="`date -u`" || exit 1
+      # Produce a test message to the topic
+      (echo "$MESSAGE" | kafka-console-producer --broker-list ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME} --producer.config /etc/kafka/kafka-client.properties) || exit 1
+      # Consume a test message from the topic
+      kafka-console-consumer --bootstrap-server ${BOOTSTRAP_SERVER} --topic ${TOPIC_NAME} --group ${GROUP_NAME} --from-beginning --timeout-ms 10000 --max-messages 1 --consumer.config /etc/kafka/kafka-client.properties | grep "$MESSAGE"
+    {{- end }}
+    env:
+    - name: TOPIC_NAME
+      value: {{ include "kafka.fullname" . }}-test-topic
+    - name: GROUP_NAME
+      value: {{ include "kafka.fullname" . }}-test
+    - name: BOOTSTRAP_SERVER
+      value: {{ include "kafka.bootstrap.server" . }}:{{ .Values.port.kafkaInternal }}
+    resources:
+      requests:
+        memory: "1024Mi"
+        cpu: "500m"
+      limits:
+        memory: "1024Mi"
+        cpu: "500m"
+    {{- if .Values.kerberos.enabled }}
+    volumeMounts:
+    - name: krb5
+      mountPath: /etc/krb5.conf
+      subPath: krb5.conf
+    - name: client-conf
+      mountPath: /etc/kafka/kafka-client.properties
+      subPath: kafka-client.properties
+    - name: generic-user-keytab
+      mountPath: "/keytabs"
+      readOnly: true
+    resources:
+      requests:
+        memory: "1024Mi"
+        cpu: "500m"
+      limits:
+        memory: "1024Mi"
+        cpu: "500m"
+  volumes:
+  - name: krb5
+    configMap:
+      name: {{ required "The .Values.kerberos.krb5Conf is required when kerberos enabled!" .Values.kerberos.krb5Conf }}
+  - name: client-conf
+    configMap:
+        name: {{ required "The .Values.kerberos.kafkaCltProperties is required when kerberos enabled!" .Values.kerberos.kafkaCltProperties }}
+  - name: generic-user-keytab
+    secret:
+      secretName: {{ required "The .Values.kerberos.testUserKeytabSecret is required when kerberos enabled!" .Values.kerberos.testUserKeytabSecret }}
+  {{- end }}

+ 126 - 0
kafka/values.yaml

@@ -0,0 +1,126 @@
+# Default values for kafka.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Image Info
+## ref: https://hub.docker.com/r/confluentinc/cp-kafka
+image:
+  registry: docker.io
+  repository: confluentinc/cp-kafka
+  # Overrides the image tag whose default is the chart appVersion.
+  tag: ""
+  pullPolicy: IfNotPresent
+imagePullSecrets:  []
+nameOverride: ""
+fullnameOverride: ""
+
+## Kafka Cluster
+replicaCount: 3
+maxUnavailable: 1
+
+## Kafka Configuration
+## ref: https://docs.confluent.io/platform/current/installation/configuration/broker-configs.html
+heapOpts: -XX:MaxRAMPercentage=75.0 -XX:InitialRAMPercentage=50.0
+confluentSupportMetricsEnable: false
+autoCreateTopicsEnable: true
+deleteTopicEnable: true
+offsetsTopicReplicationFactor: 3
+numPartitions: 3
+defaultReplicationFactor: 3
+minInsyncReplicas: 2
+uncleanLeaderElectionEnable: false
+logFlushIntervalMessages: 10000
+logFlushIntervalMs: 1000
+logRetentionBytes: 1073741824
+logRetentionCheckIntervalMs: 300000
+logRetentionHours: 168
+logSegmentBytes: 1073741824
+# messageMaxBytes: 10485880000
+messageMaxBytes: 10240000
+
+## Kafka Docker Logging - log4j log levels
+## ref: https://docs.confluent.io/platform/current/installation/docker/operations/logging.html#configure-docker-logging
+log4jRootLoglevel: INFO
+log4jLoggers: "kafka.authorizer.logger=INFO,kafka.controller=INFO"
+
+## Ports used by Kafka
+## ref: https://rmoff.net/2018/08/02/kafka-listeners-explained/
+port:
+  kafkaInternal: 9092
+  kafkaExternal: 9094
+externalAccess:
+  enabled: false
+  initNodePort: 32400
+## turn to support nodePort in docker desktop
+isDocker: true
+
+## Kafka Broker Kerberos
+## ref: https://docs.confluent.io/platform/current/kafka/authentication_sasl/authentication_sasl_gssapi.html#brokers
+kerberos:
+  enabled: false
+  krb5Conf:
+  keyTabSecret:
+  testUserKeytabSecret:
+  kafkaCltProperties:
+
+## Authorization using ACLs
+## ref: https://docs.confluent.io/platform/current/kafka/authorization.html
+acls:
+  enabled: false
+
+## Zookeeper Configuration
+## ref: https://docs.confluent.io/platform/current/zookeeper/deployment.html
+zookeeper:
+  ## If true, install the zookeeper chart
+  enabled: true
+  ## If the Zookeeper Chart is disabled a URL and port are required to connect
+  url: ""
+
+## Data Persistence
+## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+data:
+  storageClass: openebs-hostpath
+  storageSize: 10Gi
+
+## Configure Liveness and Readiness Probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 300
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 120
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Configure Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+podSecurityContext:
+  fsGroup: 1000
+
+## Configure Container Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+securityContext:
+  allowPrivilegeEscalation: false
+  readOnlyRootFilesystem: true
+  runAsUser: 1000
+  runAsGroup: 1000
+  capabilities:
+    drop:
+    - ALL
+
+## Configure Resources for Containers
+## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+resources:
+  limits:
+    cpu: 1
+    memory: 1400Mi
+  requests:
+    cpu: 250m
+    memory: 512Mi

+ 0 - 0
kafka/README.MD → kafka_bitnami/README.MD


+ 0 - 0
kafka/docker/Dockerfile → kafka_bitnami/docker/Dockerfile


+ 0 - 0
kafka/docker/entrypoint.sh → kafka_bitnami/docker/entrypoint.sh


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/.helmignore → kafka_bitnami/helm/kafka/.helmignore


+ 0 - 0
kafka/helm/kafka/Chart.lock → kafka_bitnami/helm/kafka/Chart.lock


+ 0 - 0
kafka/helm/kafka/Chart.yaml → kafka_bitnami/helm/kafka/Chart.yaml


+ 0 - 0
kafka/helm/kafka/README.md → kafka_bitnami/helm/kafka/README.md


+ 0 - 0
kafka/helm/kafka/charts/common/.helmignore → kafka_bitnami/helm/kafka/charts/common/.helmignore


+ 0 - 0
kafka/helm/kafka/charts/common/Chart.yaml → kafka_bitnami/helm/kafka/charts/common/Chart.yaml


+ 0 - 0
kafka/helm/kafka/charts/common/README.md → kafka_bitnami/helm/kafka/charts/common/README.md


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_affinities.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_affinities.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_capabilities.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_capabilities.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_errors.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_errors.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_images.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_images.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_ingress.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_ingress.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_labels.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_labels.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_names.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_names.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_secrets.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_secrets.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_storage.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_storage.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_tplvalues.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_tplvalues.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_utils.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_utils.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/_warnings.tpl → kafka_bitnami/helm/kafka/charts/common/templates/_warnings.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_cassandra.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_cassandra.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_mariadb.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_mariadb.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_mongodb.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_mongodb.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_mysql.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_mysql.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_postgresql.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_postgresql.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_redis.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_redis.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/templates/validations/_validations.tpl → kafka_bitnami/helm/kafka/charts/common/templates/validations/_validations.tpl


+ 0 - 0
kafka/helm/kafka/charts/common/values.yaml → kafka_bitnami/helm/kafka/charts/common/values.yaml


+ 21 - 0
kafka_bitnami/helm/kafka/charts/zookeeper/.helmignore

@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj

+ 0 - 0
kafka/helm/kafka/charts/zookeeper/Chart.lock → kafka_bitnami/helm/kafka/charts/zookeeper/Chart.lock


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/Chart.yaml → kafka_bitnami/helm/kafka/charts/zookeeper/Chart.yaml


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/README.md → kafka_bitnami/helm/kafka/charts/zookeeper/README.md


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/.helmignore → kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/.helmignore


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/Chart.yaml → kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/Chart.yaml


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/README.md → kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/README.md


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl → kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl


+ 0 - 0
kafka/helm/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl → kafka_bitnami/helm/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl


Some files were not shown because too many files changed in this diff