values.yaml 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. ---
  2. clusterName: "elasticsearch"
  3. nodeGroup: "master"
  4. # The service that non master groups will try to connect to when joining the cluster
  5. # This should be set to clusterName + "-" + nodeGroup for your master group
  6. masterService: ""
  7. # Elasticsearch roles that will be applied to this nodeGroup
  8. # These will be set as environment variables. E.g. node.roles=master
  9. # https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles
  10. roles:
  11. - master
  12. - data
  13. - data_content
  14. - data_hot
  15. - data_warm
  16. - data_cold
  17. - ingest
  18. - ml
  19. - remote_cluster_client
  20. - transform
  21. replicas: 2
  22. minimumMasterNodes: 2
  23. esMajorVersion: ""
  24. # Allows you to add any config files in /usr/share/elasticsearch/config/
  25. # such as elasticsearch.yml and log4j2.properties
  26. esConfig: {}
  27. # elasticsearch.yml: |
  28. # key:
  29. # nestedkey: value
  30. # log4j2.properties: |
  31. # key = value
  32. createCert: true
  33. esJvmOptions: {}
  34. # processors.options: |
  35. # -XX:ActiveProcessorCount=3
  36. # Extra environment variables to append to this nodeGroup
  37. # This will be appended to the current 'env:' key. You can use any of the kubernetes env
  38. # syntax here
  39. extraEnvs: []
  40. # - name: MY_ENVIRONMENT_VAR
  41. # value: the_value_goes_here
  42. # Allows you to load environment variables from kubernetes secret or config map
  43. envFrom: []
  44. # - secretRef:
  45. # name: env-secret
  46. # - configMapRef:
  47. # name: config-map
  48. # Disable it to use your own elastic-credential Secret.
  49. secret:
  50. enabled: true
  51. password: "12356789" # generated randomly if not defined
  52. # A list of secrets and their paths to mount inside the pod
  53. # This is useful for mounting certificates for security and for mounting
  54. # the X-Pack license
  55. secretMounts: []
  56. # - name: elastic-certificates
  57. # secretName: elastic-certificates
  58. # path: /usr/share/elasticsearch/config/certs
  59. # defaultMode: 0755
  60. hostAliases: []
  61. #- ip: "127.0.0.1"
  62. # hostnames:
  63. # - "foo.local"
  64. # - "bar.local"
  65. image: "docker.elastic.co/elasticsearch/elasticsearch"
  66. imageTag: "8.5.1"
  67. imagePullPolicy: "IfNotPresent"
  68. podAnnotations: {}
  69. # iam.amazonaws.com/role: es-cluster
  70. # additionals labels
  71. labels: {}
  72. esJavaOpts: "" # example: "-Xmx1g -Xms1g"
  73. resources:
  74. requests:
  75. cpu: "1000m"
  76. memory: "2Gi"
  77. limits:
  78. cpu: "2000m"
  79. memory: "4Gi"
  80. initResources: {}
  81. # limits:
  82. # cpu: "25m"
  83. # # memory: "128Mi"
  84. # requests:
  85. # cpu: "25m"
  86. # memory: "128Mi"
  87. networkHost: "0.0.0.0"
  88. volumeClaimTemplate:
  89. accessModes: ["ReadWriteOnce"]
  90. resources:
  91. requests:
  92. storage: 30Gi
  93. rbac:
  94. create: false
  95. serviceAccountAnnotations: {}
  96. serviceAccountName: ""
  97. automountToken: true
  98. podSecurityPolicy:
  99. create: false
  100. name: ""
  101. spec:
  102. privileged: true
  103. fsGroup:
  104. rule: RunAsAny
  105. runAsUser:
  106. rule: RunAsAny
  107. seLinux:
  108. rule: RunAsAny
  109. supplementalGroups:
  110. rule: RunAsAny
  111. volumes:
  112. - secret
  113. - configMap
  114. - persistentVolumeClaim
  115. - emptyDir
  116. persistence:
  117. enabled: true
  118. labels:
  119. # Add default labels for the volumeClaimTemplate of the StatefulSet
  120. enabled: false
  121. annotations: {}
  122. extraVolumes: []
  123. # - name: extras
  124. # emptyDir: {}
  125. extraVolumeMounts: []
  126. # - name: extras
  127. # mountPath: /usr/share/extras
  128. # readOnly: true
  129. extraContainers: []
  130. # - name: do-something
  131. # image: busybox
  132. # command: ['do', 'something']
  133. extraInitContainers: []
  134. # - name: do-something
  135. # image: busybox
  136. # command: ['do', 'something']
  137. # This is the PriorityClass settings as defined in
  138. # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
  139. priorityClassName: ""
  140. # By default this will make sure two pods don't end up on the same node
  141. # Changing this to a region would allow you to spread pods across regions
  142. antiAffinityTopologyKey: "kubernetes.io/hostname"
  143. # Hard means that by default pods will only be scheduled if there are enough nodes for them
  144. # and that they will never end up on the same node. Setting this to soft will do this "best effort"
  145. antiAffinity: "hard"
  146. # This is the node affinity settings as defined in
  147. # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
  148. nodeAffinity: {}
  149. # The default is to deploy all pods serially. By setting this to parallel all pods are started at
  150. # the same time when bootstrapping the cluster
  151. podManagementPolicy: "Parallel"
  152. # The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
  153. # there are many services in the current namespace.
  154. # If you experience slow pod startups you probably want to set this to `false`.
  155. enableServiceLinks: true
  156. protocol: https
  157. httpPort: 9200
  158. transportPort: 9300
  159. service:
  160. enabled: true
  161. labels: {}
  162. labelsHeadless: {}
  163. type: ClusterIP
  164. # Consider that all endpoints are considered "ready" even if the Pods themselves are not
  165. # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
  166. publishNotReadyAddresses: false
  167. nodePort: ""
  168. annotations: {}
  169. httpPortName: http
  170. transportPortName: transport
  171. loadBalancerIP: ""
  172. loadBalancerSourceRanges: []
  173. externalTrafficPolicy: ""
  174. updateStrategy: RollingUpdate
  175. # This is the max unavailable setting for the pod disruption budget
  176. # The default value of 1 will make sure that kubernetes won't allow more than 1
  177. # of your pods to be unavailable during maintenance
  178. maxUnavailable: 1
  179. podSecurityContext:
  180. fsGroup: 1000
  181. runAsUser: 1000
  182. securityContext:
  183. capabilities:
  184. drop:
  185. - ALL
  186. # readOnlyRootFilesystem: true
  187. runAsNonRoot: true
  188. runAsUser: 1000
  189. # How long to wait for elasticsearch to stop gracefully
  190. terminationGracePeriod: 120
  191. sysctlVmMaxMapCount: 262144
  192. readinessProbe:
  193. failureThreshold: 3
  194. initialDelaySeconds: 10
  195. periodSeconds: 10
  196. successThreshold: 3
  197. timeoutSeconds: 5
  198. # https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
  199. clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
  200. ## Use an alternate scheduler.
  201. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  202. ##
  203. schedulerName: ""
  204. imagePullSecrets: []
  205. nodeSelector: {}
  206. tolerations: []
  207. # Enabling this will publicly expose your Elasticsearch instance.
  208. # Only enable this if you have security enabled on your cluster
  209. ingress:
  210. enabled: false
  211. annotations: {}
  212. # kubernetes.io/ingress.class: nginx
  213. # kubernetes.io/tls-acme: "true"
  214. className: "nginx"
  215. pathtype: ImplementationSpecific
  216. hosts:
  217. - host: es.cecf.base
  218. paths:
  219. - path: /
  220. tls: []
  221. # - secretName: chart-example-tls
  222. # hosts:
  223. # - chart-example.local
  224. nameOverride: ""
  225. fullnameOverride: ""
  226. healthNameOverride: ""
  227. lifecycle: {}
  228. # preStop:
  229. # exec:
  230. # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  231. # postStart:
  232. # exec:
  233. # command:
  234. # - bash
  235. # - -c
  236. # - |
  237. # #!/bin/bash
  238. # # Add a template to adjust number of shards/replicas
  239. # TEMPLATE_NAME=my_template
  240. # INDEX_PATTERN="logstash-*"
  241. # SHARD_COUNT=8
  242. # REPLICA_COUNT=1
  243. # ES_URL=http://localhost:9200
  244. # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
  245. # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
  246. sysctlInitContainer:
  247. enabled: true
  248. keystore: []
  249. networkPolicy:
  250. ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
  251. ## In order for a Pod to access Elasticsearch, it needs to have the following label:
  252. ## {{ template "uname" . }}-client: "true"
  253. ## Example for default configuration to access HTTP port:
  254. ## elasticsearch-master-http-client: "true"
  255. ## Example for default configuration to access transport port:
  256. ## elasticsearch-master-transport-client: "true"
  257. http:
  258. enabled: false
  259. ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
  260. ## and matching all criteria can reach the DB.
  261. ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
  262. ## parameter to select these namespaces
  263. ##
  264. # explicitNamespacesSelector:
  265. # # Accept from namespaces with all those different rules (only from whitelisted Pods)
  266. # matchLabels:
  267. # role: frontend
  268. # matchExpressions:
  269. # - {key: role, operator: In, values: [frontend]}
  270. ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
  271. ##
  272. # additionalRules:
  273. # - podSelector:
  274. # matchLabels:
  275. # role: frontend
  276. # - podSelector:
  277. # matchExpressions:
  278. # - key: role
  279. # operator: In
  280. # values:
  281. # - frontend
  282. transport:
  283. ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
  284. enabled: false
  285. # explicitNamespacesSelector:
  286. # matchLabels:
  287. # role: frontend
  288. # matchExpressions:
  289. # - {key: role, operator: In, values: [frontend]}
  290. # additionalRules:
  291. # - podSelector:
  292. # matchLabels:
  293. # role: frontend
  294. # - podSelector:
  295. # matchExpressions:
  296. # - key: role
  297. # operator: In
  298. # values:
  299. # - frontend
  300. tests:
  301. enabled: true