global: image: # -- Overrides the Docker registry globally for all images, excluding enterprise. registry: docker.io # -- Optional list of imagePullSecrets for all images, excluding enterprise. # Names of existing secrets with private container registry credentials. # Ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # Example: # pullSecrets: [ my-dockerconfigjson-secret ] pullSecrets: [] # -- Overrides the priorityClassName for all pods priorityClassName: null # -- configures cluster domain ("cluster.local" by default) clusterDomain: 'cluster.local' # -- configures DNS service name dnsService: 'kube-dns' # -- configures DNS service namespace dnsNamespace: 'kube-system' fullnameOverride: '' # fullnameOverride: tempo # -- Configuration is loaded from the secret called 'externalConfigSecretName'. # If 'useExternalConfig' is true, then the configuration is not generated, just # consumed. Top level keys for `tempo.yaml` and `overrides.yaml` are to be # provided by the user. useExternalConfig: false # -- Defines what kind of object stores the configuration, a ConfigMap or a Secret. # In order to move sensitive information (such as credentials) from the ConfigMap/Secret to a more secure location (e.g. vault), it is possible to use [environment variables in the configuration](https://grafana.com/docs/mimir/latest/operators-guide/configuring/reference-configuration-parameters/#use-environment-variables-in-the-configuration). # Such environment variables can be then stored in a separate Secret and injected via the global.extraEnvFrom value. For details about environment injection from a Secret please see [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables). configStorageType: ConfigMap # -- Name of the Secret or ConfigMap that contains the configuration (used for naming even if config is internal). externalConfigSecretName: '{{ include "tempo.resourceName" (dict "ctx" . "component" "config") }}' # -- Name of the Secret or ConfigMap that contains the runtime configuration (used for naming even if config is internal). externalRuntimeConfigName: '{{ include "tempo.resourceName" (dict "ctx" . "component" "runtime") }}' # -- When 'useExternalConfig' is true, then changing 'externalConfigVersion' triggers restart of services - otherwise changes to the configuration cause a restart. externalConfigVersion: '0' # -- If true, Tempo will report anonymous usage data about the shape of a deployment to Grafana Labs reportingEnabled: true tempo: image: # -- The Docker registry registry: docker.io # -- Optional list of imagePullSecrets. Overrides `global.image.pullSecrets` pullSecrets: [] # -- Docker image repository repository: grafana/tempo # -- Overrides the image tag whose default is the chart's appVersion tag: null pullPolicy: IfNotPresent readinessProbe: httpGet: path: /ready port: http-metrics initialDelaySeconds: 30 timeoutSeconds: 1 # -- Global labels for all tempo pods podLabels: {} # -- Common annotations for all pods podAnnotations: {} # -- SecurityContext holds container-level security attributes and common container settings securityContext: runAsNonRoot: true runAsUser: 1000 runAsGroup: 1000 allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true # -- podSecurityContext holds pod-level security attributes and common container settings podSecurityContext: fsGroup: 1000 # -- Structured tempo configuration structuredConfig: {} # -- Memberlist service configuration. memberlist: # -- Adds the appProtocol field to the memberlist service. This allows memberlist to work with istio protocol selection. Set the optional service protocol. Ex: "tcp", "http" or "https". appProtocol: null serviceAccount: # -- Specifies whether a ServiceAccount should be created create: true # -- The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: null # -- Image pull secrets for the service account imagePullSecrets: [] # -- Annotations for the service account annotations: {} automountServiceAccountToken: false rbac: # -- Specifies whether RBAC manifests should be created create: false # -- Specifies whether a PodSecurityPolicy should be created pspEnabled: false # Configuration for the ingester ingester: # -- Annotations for the ingester StatefulSet annotations: {} # -- Number of replicas for the ingester replicas: 3 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld autoscaling: # -- Enable autoscaling for the ingester. WARNING: Autoscaling ingesters can result in lost data. Only do this if you know what you're doing. enabled: false # -- Minimum autoscaling replicas for the ingester minReplicas: 2 # -- Maximum autoscaling replicas for the ingester maxReplicas: 3 # -- Autoscaling behavior configuration for the ingester behavior: {} # -- Target CPU utilisation percentage for the ingester targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the ingester targetMemoryUtilizationPercentage: image: # -- The Docker registry for the ingester image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the ingester image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the ingester image. Overrides `tempo.image.tag` tag: null # -- The name of the PriorityClass for ingester pods priorityClassName: null # -- Labels for ingester pods podLabels: {} # -- Annotations for ingester pods podAnnotations: {} # -- Additional CLI args for the ingester extraArgs: [] # -- Environment variables to add to the ingester pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the ingester pods extraEnvFrom: [] # -- Resource requests and limits for the ingester resources: {} # -- Grace period to allow the ingester to shutdown before it is killed. Especially for the ingestor, # this must be increased. It must be long enough so ingesters can be gracefully shutdown flushing/transferring # all data and to successfully leave the member ring on shutdown. terminationGracePeriodSeconds: 300 # -- topologySpread for ingester pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "ingester") | nindent 6 }} # -- Affinity for ingester pods. Passed through `tpl` and, thus, to be configured as string # @default -- Soft node and soft zone anti-affinity affinity: | podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "ingester") | nindent 12 }} topologyKey: kubernetes.io/hostname - weight: 75 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "ingester") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for ingester pods nodeSelector: {} # -- Tolerations for ingester pods tolerations: [] # -- Extra volumes for ingester pods extraVolumeMounts: [] # -- Extra volumes for ingester deployment extraVolumes: [] persistence: # -- Enable creating PVCs which is required when using boltdb-shipper enabled: true # -- use emptyDir with ramdisk instead of PVC. **Please note that all data in ingester will be lost on pod restart** inMemory: false # -- Size of persistent or memory disk size: 50Gi # -- Storage class to be used. # If defined, storageClassName: . # If set to "-", storageClassName: "", which disables dynamic provisioning. # If empty or set to null, no storageClassName spec is # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). storageClass: "openebs-hostpath" # -- Annotations for ingester's persist volume claim annotations: {} config: # -- Number of copies of spans to store in the ingester ring replication_factor: 3 # -- Amount of time a trace must be idle before flushing it to the wal. trace_idle_period: null # -- How often to sweep all tenants and move traces from live -> wal -> completed blocks. flush_check_period: null # -- Maximum size of a block before cutting it max_block_bytes: null # -- Maximum length of time before cutting a block max_block_duration: null # -- Duration to keep blocks in the ingester after they have been flushed complete_block_timeout: null # -- Flush all traces to backend when ingester is stopped flush_all_on_shutdown: false service: # -- Annotations for ingester service annotations: {} # -- Adds the appProtocol field to the ingester service. This allows ingester to work with istio protocol selection. appProtocol: # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" grpc: null # Configuration for the metrics-generator metricsGenerator: # -- Specifies whether a metrics-generator should be deployed enabled: true # -- Annotations for the metrics-generator StatefulSet annotations: {} # -- Number of replicas for the metrics-generator replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld image: # -- The Docker registry for the metrics-generator image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the metrics-generator image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the metrics-generator image. Overrides `tempo.image.tag` tag: null # -- The name of the PriorityClass for metrics-generator pods priorityClassName: null # -- Labels for metrics-generator pods podLabels: {} # -- Annotations for metrics-generator pods podAnnotations: {} # -- Additional CLI args for the metrics-generator extraArgs: [] # -- Environment variables to add to the metrics-generator pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the metrics-generator pods extraEnvFrom: [] # -- Resource requests and limits for the metrics-generator resources: {} # -- Grace period to allow the metrics-generator to shutdown before it is killed. Especially for the ingestor, # this must be increased. It must be long enough so metrics-generators can be gracefully shutdown flushing/transferring # all data and to successfully leave the member ring on shutdown. terminationGracePeriodSeconds: 300 # -- topologySpread for metrics-generator pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "metrics-generator") | nindent 6 }} # -- Affinity for metrics-generator pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "metrics-generator") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "metrics-generator") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for metrics-generator pods nodeSelector: {} # -- Tolerations for metrics-generator pods tolerations: [] # -- The EmptyDir location where the /var/tempo will be mounted on. Defaults to local disk, can be set to memory. walEmptyDir: {} ## Here shows how to configure 1Gi memory as emptyDir. ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#emptydirvolumesource-v1-core # medium: "Memory" # sizeLimit: 1Gi # -- Extra volumes for metrics-generator pods extraVolumeMounts: [] # -- Extra volumes for metrics-generator deployment extraVolumes: [] # -- Default ports ports: - name: grpc port: 9095 service: true - name: http-memberlist port: 7946 service: false - name: http-metrics port: 3100 service: true # -- More information on configuration: https://grafana.com/docs/tempo/latest/configuration/#metrics-generator config: registry: collection_interval: 15s external_labels: {} stale_duration: 15m processor: # -- For processors to be enabled and generate metrics, pass the names of the processors to overrides.metrics_generator_processors value like [service-graphs, span-metrics] service_graphs: # -- Additional dimensions to add to the metrics. Dimensions are searched for in the # -- resource and span attributes and are added to the metrics if present. dimensions: [] histogram_buckets: [0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8] max_items: 10000 wait: 10s workers: 10 span_metrics: # -- Additional dimensions to add to the metrics along with the default dimensions. # -- Dimensions are searched for in the resource and span attributes and are added to the metrics if present. dimensions: [] histogram_buckets: [0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.02, 2.05, 4.10] storage: path: /var/tempo/wal wal: remote_write_flush_deadline: 1m # -- A list of remote write endpoints. # -- https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write remote_write: - name: prometheus url: "http://prometheus-server.observe.svc.cluster.local:80/api/v1/write" metrics_ingestion_time_range_slack: 30s service: # -- Annotations for Metrics Generator service annotations: {} # -- Adds the appProtocol field to the metricsGenerator service. This allows metricsGenerator to work with istio protocol selection. appProtocol: # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" grpc: null # Configuration for the distributor distributor: # -- Number of replicas for the distributor replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld autoscaling: # -- Enable autoscaling for the distributor enabled: false # -- Minimum autoscaling replicas for the distributor minReplicas: 1 # -- Maximum autoscaling replicas for the distributor maxReplicas: 3 # -- Autoscaling behavior configuration for the distributor behavior: {} # -- Target CPU utilisation percentage for the distributor targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the distributor targetMemoryUtilizationPercentage: image: # -- The Docker registry for the ingester image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the ingester image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the ingester image. Overrides `tempo.image.tag` tag: null service: # -- Annotations for distributor service annotations: {} # -- Labels for distributor service labels: {} # -- Type of service for the distributor type: ClusterIP # -- If type is LoadBalancer you can assign the IP to the LoadBalancer loadBalancerIP: '' # -- If type is LoadBalancer limit incoming traffic from IPs. loadBalancerSourceRanges: [] serviceDiscovery: # -- Annotations for distributorDiscovery service annotations: {} # -- Labels for distributorDiscovery service labels: {} # -- The name of the PriorityClass for distributor pods priorityClassName: null # -- Labels for distributor pods podLabels: {} # -- Annotations for distributor pods podAnnotations: {} # -- Additional CLI args for the distributor extraArgs: [] # -- Environment variables to add to the distributor pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the distributor pods extraEnvFrom: [] # -- Resource requests and limits for the distributor resources: {} # -- Grace period to allow the distributor to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- topologySpread for distributor pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "distributor") | nindent 6 }} # -- Affinity for distributor pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "distributor") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "distributor") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for distributor pods nodeSelector: {} # -- Tolerations for distributor pods tolerations: [] # -- Extra volumes for distributor pods extraVolumeMounts: [] # -- Extra volumes for distributor deployment extraVolumes: [] config: # -- Enable to log every received trace id to help debug ingestion # -- WARNING: Deprecated. Use log_received_spans instead. log_received_traces: null # -- Enable to log every received span to help debug ingestion or calculate span error distributions using the logs log_received_spans: enabled: false include_all_attributes: false filter_by_status_error: false # -- Disables write extension with inactive ingesters extend_writes: null # -- Adds the appProtocol field to the distributor service. This allows distributor to work with istio protocol selection. appProtocol: # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" grpc: null # Configuration for the compactor compactor: # -- Number of replicas for the compactor replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld image: # -- The Docker registry for the compactor image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the compactor image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the compactor image. Overrides `tempo.image.tag` tag: null # -- The name of the PriorityClass for compactor pods priorityClassName: null # -- Labels for compactor pods podLabels: {} # -- Annotations for compactor pods podAnnotations: {} # -- Additional CLI args for the compactor extraArgs: [] # -- Environment variables to add to the compactor pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the compactor pods extraEnvFrom: [] # -- Resource requests and limits for the compactor resources: {} # -- Grace period to allow the compactor to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- Node selector for compactor pods nodeSelector: {} # -- Tolerations for compactor pods tolerations: [] # -- Extra volumes for compactor pods extraVolumeMounts: [] # -- Extra volumes for compactor deployment extraVolumes: [] config: compaction: # -- Duration to keep blocks block_retention: 48h # Duration to keep blocks that have been compacted elsewhere compacted_block_retention: 1h # -- Blocks in this time window will be compacted together compaction_window: 1h # -- Amount of data to buffer from input blocks v2_in_buffer_bytes: 5242880 # -- Flush data to backend when buffer is this large v2_out_buffer_bytes: 20971520 # -- Maximum number of traces in a compacted block. WARNING: Deprecated. Use max_block_bytes instead. max_compaction_objects: 6000000 # -- Maximum size of a compacted block in bytes max_block_bytes: 107374182400 # -- Number of tenants to process in parallel during retention retention_concurrency: 10 # -- Number of traces to buffer in memory during compaction v2_prefetch_traces_count: 1000 # -- The maximum amount of time to spend compacting a single tenant before moving to the next max_time_per_tenant: 5m # -- The time between compaction cycles compaction_cycle: 30s service: # -- Annotations for compactor service annotations: {} dnsConfigOverides: enabled: false dnsConfig: options: - name: ndots value: "3" # This is required for Azure Kubernetes Service (AKS) https://github.com/grafana/tempo/issues/1462 # Configuration for the querier querier: # -- Number of replicas for the querier replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld autoscaling: # -- Enable autoscaling for the querier enabled: false # -- Minimum autoscaling replicas for the querier minReplicas: 1 # -- Maximum autoscaling replicas for the querier maxReplicas: 3 # -- Autoscaling behavior configuration for the querier behavior: {} # -- Target CPU utilisation percentage for the querier targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the querier targetMemoryUtilizationPercentage: image: # -- The Docker registry for the querier image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the querier image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the querier image. Overrides `tempo.image.tag` tag: null # -- The name of the PriorityClass for querier pods priorityClassName: null # -- Labels for querier pods podLabels: {} # -- Annotations for querier pods podAnnotations: {} # -- Additional CLI args for the querier extraArgs: [] # -- Environment variables to add to the querier pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the querier pods extraEnvFrom: [] # -- Resource requests and limits for the querier resources: {} # -- Grace period to allow the querier to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- topologySpread for querier pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "querier") | nindent 6 }} # -- Affinity for querier pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "querier" "memberlist" true) | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "querier" "memberlist" true) | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for querier pods nodeSelector: {} # -- Tolerations for querier pods tolerations: [] # -- Extra volumes for querier pods extraVolumeMounts: [] # -- Extra volumes for querier deployment extraVolumes: [] config: frontend_worker: # -- grpc client configuration grpc_client_config: {} trace_by_id: # -- Timeout for trace lookup requests query_timeout: 10s search: # -- Timeout for search requests query_timeout: 30s # -- If search_external_endpoints is set then the querier will primarily act as a proxy for whatever serverless backend you have configured. This setting allows the operator to have the querier prefer itself for a configurable number of subqueries. prefer_self: 10 # -- If set to a non-zero value a second request will be issued at the provided duration. Recommended to be set to p99 of external search requests to reduce long tail latency. external_hedge_requests_at: 8s # -- The maximum number of requests to execute when hedging. Requires hedge_requests_at to be set. external_hedge_requests_up_to: 2 # -- A list of external endpoints that the querier will use to offload backend search requests external_endpoints: [] # -- The serverless backend to use. The default value of "" omits # -- credentials when querying the external backend. external_backend: "" # -- Google Cloud Run configuration. Will be used only if the value of # -- external_backend is "google_cloud_run". google_cloud_run: {} # -- This value controls the overall number of simultaneous subqueries that the querier will service at once. It does not distinguish between the types of queries. max_concurrent_queries: 20 service: # -- Annotations for querier service annotations: {} # -- Adds the appProtocol field to the querier service. This allows querier to work with istio protocol selection. appProtocol: # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" grpc: null # Configuration for the query-frontend queryFrontend: query: # -- Required for grafana version <7.5 for compatibility with jaeger-ui. Doesn't work on ARM arch enabled: false image: # -- The Docker registry for the query-frontend image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the query-frontend image. Overrides `tempo.image.repository` repository: grafana/tempo-query # -- Docker image tag for the query-frontend image. Overrides `tempo.image.tag` tag: null # -- Resource requests and limits for the query resources: {} # -- Additional CLI args for tempo-query pods extraArgs: [] # -- Environment variables to add to the tempo-query pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the tempo-query pods extraEnvFrom: [] # -- Extra volumes for tempo-query pods extraVolumeMounts: [] # -- Extra volumes for tempo-query deployment extraVolumes: [] config: | backend: 127.0.0.1:3100 # -- Number of replicas for the query-frontend replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld config: # -- Number of times to retry a request sent to a querier max_retries: 2 search: # -- The number of concurrent jobs to execute when searching the backend concurrent_jobs: 1000 # -- The target number of bytes for each job to handle when performing a backend search target_bytes_per_job: 104857600 # -- Trace by ID lookup configuration trace_by_id: # -- The number of shards to split a trace by id query into. query_shards: 50 # -- If set to a non-zero value, a second request will be issued at the provided duration. Recommended to be set to p99 of search requests to reduce long-tail latency. hedge_requests_at: 2s # -- The maximum number of requests to execute when hedging. Requires hedge_requests_at to be set. Must be greater than 0. hedge_requests_up_to: 2 autoscaling: # -- Enable autoscaling for the query-frontend enabled: false # -- Minimum autoscaling replicas for the query-frontend minReplicas: 1 # -- Maximum autoscaling replicas for the query-frontend maxReplicas: 3 # -- Autoscaling behavior configuration for the query-frontend behavior: {} # -- Target CPU utilisation percentage for the query-frontend targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the query-frontend targetMemoryUtilizationPercentage: image: # -- The Docker registry for the query-frontend image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the query-frontend image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the query-frontend image. Overrides `tempo.image.tag` tag: null service: # -- Port of the query-frontend service port: 16686 # -- Annotations for queryFrontend service annotations: {} # -- Labels for queryFrontend service labels: {} # -- Type of service for the queryFrontend type: ClusterIP # -- If type is LoadBalancer you can assign the IP to the LoadBalancer loadBalancerIP: "" # -- If type is LoadBalancer limit incoming traffic from IPs. loadBalancerSourceRanges: [] serviceDiscovery: # -- Annotations for queryFrontendDiscovery service annotations: {} # -- Labels for queryFrontendDiscovery service labels: {} ingress: # -- Specifies whether an ingress for the Jaeger should be created enabled: false # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 # ingressClassName: nginx # -- Annotations for the Jaeger ingress annotations: {} # -- Hosts configuration for the Jaeger ingress hosts: - host: query.tempo.example.com paths: - path: / # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers # pathType: Prefix # -- TLS configuration for the Jaeger ingress tls: - secretName: tempo-query-tls hosts: - query.tempo.example.com # -- The name of the PriorityClass for query-frontend pods priorityClassName: null # -- Labels for queryFrontend pods podLabels: {} # -- Annotations for query-frontend pods podAnnotations: {} # -- Additional CLI args for the query-frontend extraArgs: [] # -- Environment variables to add to the query-frontend pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the query-frontend pods extraEnvFrom: [] # -- Resource requests and limits for the query-frontend resources: {} # -- Grace period to allow the query-frontend to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- topologySpread for query-frontend pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "query-frontend") | nindent 6 }} # -- Affinity for query-frontend pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "query-frontend") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "query-frontend") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for query-frontend pods nodeSelector: {} # -- Tolerations for query-frontend pods tolerations: [] # -- Extra volumes for query-frontend pods extraVolumeMounts: [] # -- Extra volumes for query-frontend deployment extraVolumes: [] # -- Adds the appProtocol field to the queryFrontend service. This allows queryFrontend to work with istio protocol selection. appProtocol: # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" grpc: null # Configuration for the federation-frontend # Can only be enabled if enterprise.enabled is true - requires license. enterpriseFederationFrontend: # -- Specifies whether a federation-frontend should be deployed enabled: false # -- Number of replicas for the federation-frontend replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld # proxy_targets: # - name: own-data-center # url: http://get/tempo # - name: grafana-cloud # url: https://tempo-us-central1.grafana.net/tempo # basic_auth: # username: # password: proxy_targets: [] autoscaling: # -- Enable autoscaling for the federation-frontend enabled: false # -- Minimum autoscaling replicas for the federation-frontend minReplicas: 1 # -- Maximum autoscaling replicas for the federation-frontend maxReplicas: 3 # -- Target CPU utilisation percentage for the federation-frontend targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the federation-frontend targetMemoryUtilizationPercentage: image: # -- The Docker registry for the federation-frontend image. Overrides `tempo.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `tempo.image.pullSecrets` pullSecrets: [] # -- Docker image repository for the federation-frontend image. Overrides `tempo.image.repository` repository: null # -- Docker image tag for the federation-frontend image. Overrides `tempo.image.tag` tag: null service: # -- Port of the federation-frontend service port: 3100 # -- Annotations for enterpriseFederationFrontend service annotations: {} # -- Type of service for the enterpriseFederationFrontend type: ClusterIP # -- If type is LoadBalancer you can assign the IP to the LoadBalancer loadBalancerIP: "" # -- If type is LoadBalancer limit incoming traffic from IPs. loadBalancerSourceRanges: [] # -- The name of the PriorityClass for federation-frontend pods priorityClassName: null # -- Labels for enterpriseFederationFrontend pods podLabels: {} # -- Annotations for federation-frontend pods podAnnotations: {} # -- Additional CLI args for the federation-frontend extraArgs: [] # -- Environment variables to add to the federation-frontend pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the federation-frontend pods extraEnvFrom: [] # -- Resource requests and limits for the federation-frontend resources: {} # -- Grace period to allow the federation-frontend to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- topologySpread for federation-frontend pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: failure-domain.beta.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "federation-frontend") | nindent 6 }} # -- Affinity for federation-frontend pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "federation-frontend") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "federation-frontend") | nindent 12 }} topologyKey: failure-domain.beta.kubernetes.io/zone # -- Node selector for federation-frontend pods nodeSelector: {} # -- Tolerations for federation-frontend pods tolerations: [] # -- Extra volumes for federation-frontend pods extraVolumeMounts: [] # -- Extra volumes for federation-frontend deployment extraVolumes: [] multitenancyEnabled: false traces: jaeger: grpc: # -- Enable Tempo to ingest Jaeger GRPC traces enabled: false # -- Jaeger GRPC receiver config receiverConfig: {} thriftBinary: # -- Enable Tempo to ingest Jaeger Thrift Binary traces enabled: false # -- Jaeger Thrift Binary receiver config receiverConfig: {} thriftCompact: # -- Enable Tempo to ingest Jaeger Thrift Compact traces enabled: false # -- Jaeger Thrift Compact receiver config receiverConfig: {} thriftHttp: # -- Enable Tempo to ingest Jaeger Thrift HTTP traces enabled: false # -- Jaeger Thrift HTTP receiver config receiverConfig: {} zipkin: # -- Enable Tempo to ingest Zipkin traces enabled: false # -- Zipkin receiver config receiverConfig: {} otlp: http: # -- Enable Tempo to ingest Open Telemetry HTTP traces enabled: true # -- HTTP receiver advanced config receiverConfig: {} grpc: # -- Enable Tempo to ingest Open Telemetry GRPC traces enabled: true # -- GRPC receiver advanced config receiverConfig: {} opencensus: # -- Enable Tempo to ingest Open Census traces enabled: false # -- Open Census receiver config receiverConfig: {} # -- Enable Tempo to ingest traces from Kafka. Reference: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/kafkareceiver kafka: {} # -- Memberlist configuration. Please refer to https://grafana.com/docs/tempo/latest/configuration/#memberlist memberlist: node_name: "" randomize_node_name: true stream_timeout: "10s" retransmit_factor: 2 pull_push_interval: "30s" gossip_interval: "1s" gossip_nodes: 2 gossip_to_dead_nodes_time: "30s" min_join_backoff: "1s" max_join_backoff: "1m" max_join_retries: 10 abort_if_cluster_join_fails: false rejoin_interval: "0s" left_ingesters_timeout: "5m" leave_timeout: "5s" bind_addr: [] bind_port: 7946 packet_dial_timeout: "5s" packet_write_timeout: "5s" # -- Config file contents for Tempo distributed. Passed through the `tpl` function to allow templating # @default -- See values.yaml config: | multitenancy_enabled: {{ .Values.multitenancyEnabled }} usage_report: reporting_enabled: {{ .Values.reportingEnabled }} {{- if .Values.enterprise.enabled }} license: path: "/license/license.jwt" admin_api: leader_election: enabled: true ring: kvstore: store: "memberlist" auth: type: enterprise http_api_prefix: {{get .Values.tempo.structuredConfig "http_api_prefix"}} admin_client: storage: backend: {{.Values.storage.admin.backend}} {{- if eq .Values.storage.admin.backend "s3"}} s3: {{- toYaml .Values.storage.admin.s3 | nindent 6}} {{- end}} {{- if eq .Values.storage.admin.backend "gcs"}} gcs: {{- toYaml .Values.storage.admin.gcs | nindent 6}} {{- end}} {{- if eq .Values.storage.admin.backend "azure"}} azure: {{- toYaml .Values.storage.admin.azure | nindent 6}} {{- end}} {{- if eq .Values.storage.admin.backend "swift"}} swift: {{- toYaml .Values.storage.admin.swift | nindent 6}} {{- end}} {{- if eq .Values.storage.admin.backend "filesystem"}} filesystem: {{- toYaml .Values.storage.admin.filesystem | nindent 6}} {{- end}} {{- end }} {{- if and .Values.enterprise.enabled .Values.enterpriseGateway.useDefaultProxyURLs }} gateway: proxy: admin_api: url: http://{{ template "tempo.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} compactor: url: http://{{ template "tempo.fullname" . }}-compactor.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} default: url: http://{{ template "tempo.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} distributor: url: http://{{ template "tempo.fullname" . }}-distributor.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} otlp/grpc: url: h2c://{{ template "tempo.fullname" . }}-distributor.{{ .Release.Namespace }}.svc:4317 otlp/http: url: http://{{ template "tempo.fullname" . }}-distributor.{{ .Release.Namespace }}.svc:4318 ingester: url: http://{{ template "tempo.fullname" . }}-ingester.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} querier: url: http://{{ template "tempo.fullname" . }}-querier.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }} query_frontend: url: http://{{ template "tempo.fullname" . }}-query-frontend.{{ .Release.Namespace }}.svc:{{ include "tempo.serverHttpListenPort" . }}{{get .Values.tempo.structuredConfig "http_api_prefix"}} {{else}} {{- if and .Values.enterprise.enabled .Values.enterpriseGateway.proxy }} gateway: proxy: {{- toYaml .Values.enterpriseGateway.proxy | nindent 6 }} {{- end }} {{- end }} compactor: compaction: block_retention: {{ .Values.compactor.config.compaction.block_retention }} compacted_block_retention: {{ .Values.compactor.config.compaction.compacted_block_retention }} compaction_window: {{ .Values.compactor.config.compaction.compaction_window }} v2_in_buffer_bytes: {{ .Values.compactor.config.compaction.v2_in_buffer_bytes }} v2_out_buffer_bytes: {{ .Values.compactor.config.compaction.v2_out_buffer_bytes }} max_compaction_objects: {{ .Values.compactor.config.compaction.max_compaction_objects }} max_block_bytes: {{ .Values.compactor.config.compaction.max_block_bytes }} retention_concurrency: {{ .Values.compactor.config.compaction.retention_concurrency }} v2_prefetch_traces_count: {{ .Values.compactor.config.compaction.v2_prefetch_traces_count }} max_time_per_tenant: {{ .Values.compactor.config.compaction.max_time_per_tenant }} compaction_cycle: {{ .Values.compactor.config.compaction.compaction_cycle }} ring: kvstore: store: memberlist {{- if and .Values.enterprise.enabled .Values.enterpriseFederationFrontend.enabled }} federation: proxy_targets: {{- toYaml .Values.enterpriseFederationFrontend.proxy_targets | nindent 6 }} {{- end }} {{- if .Values.metricsGenerator.enabled }} metrics_generator: ring: kvstore: store: memberlist processor: {{- toYaml .Values.metricsGenerator.config.processor | nindent 6 }} storage: {{- toYaml .Values.metricsGenerator.config.storage | nindent 6 }} registry: {{- toYaml .Values.metricsGenerator.config.registry | nindent 6 }} metrics_ingestion_time_range_slack: {{ .Values.metricsGenerator.config.metrics_ingestion_time_range_slack }} {{- end }} distributor: ring: kvstore: store: memberlist receivers: {{- if or (.Values.traces.jaeger.thriftCompact.enabled) (.Values.traces.jaeger.thriftBinary.enabled) (.Values.traces.jaeger.thriftHttp.enabled) (.Values.traces.jaeger.grpc.enabled) }} jaeger: protocols: {{- if .Values.traces.jaeger.thriftCompact.enabled }} thrift_compact: {{- $mergedJaegerThriftCompactConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:6831") .Values.traces.jaeger.thriftCompact.receiverConfig }} {{- toYaml $mergedJaegerThriftCompactConfig | nindent 10 }} {{- end }} {{- if .Values.traces.jaeger.thriftBinary.enabled }} thrift_binary: {{- $mergedJaegerThriftBinaryConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:6832") .Values.traces.jaeger.thriftBinary.receiverConfig }} {{- toYaml $mergedJaegerThriftBinaryConfig | nindent 10 }} {{- end }} {{- if .Values.traces.jaeger.thriftHttp.enabled }} thrift_http: {{- $mergedJaegerThriftHttpConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:14268") .Values.traces.jaeger.thriftHttp.receiverConfig }} {{- toYaml $mergedJaegerThriftHttpConfig | nindent 10 }} {{- end }} {{- if .Values.traces.jaeger.grpc.enabled }} grpc: {{- $mergedJaegerGrpcConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:14250") .Values.traces.jaeger.grpc.receiverConfig }} {{- toYaml $mergedJaegerGrpcConfig | nindent 10 }} {{- end }} {{- end }} {{- if .Values.traces.zipkin.enabled }} zipkin: {{- $mergedZipkinReceiverConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:9411") .Values.traces.zipkin.receiverConfig }} {{- toYaml $mergedZipkinReceiverConfig | nindent 6 }} {{- end }} {{- if or (.Values.traces.otlp.http.enabled) (.Values.traces.otlp.grpc.enabled) }} otlp: protocols: {{- if .Values.traces.otlp.http.enabled }} http: {{- $mergedOtlpHttpReceiverConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:4318") .Values.traces.otlp.http.receiverConfig }} {{- toYaml $mergedOtlpHttpReceiverConfig | nindent 10 }} {{- end }} {{- if .Values.traces.otlp.grpc.enabled }} grpc: {{- $mergedOtlpGrpcReceiverConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:4317") .Values.traces.otlp.grpc.receiverConfig }} {{- toYaml $mergedOtlpGrpcReceiverConfig | nindent 10 }} {{- end }} {{- end }} {{- if .Values.traces.opencensus.enabled }} opencensus: {{- $mergedOpencensusReceiverConfig := mustMergeOverwrite (dict "endpoint" "0.0.0.0:55678") .Values.traces.opencensus.receiverConfig }} {{- toYaml $mergedOpencensusReceiverConfig | nindent 6 }} {{- end }} {{- if .Values.traces.kafka }} kafka: {{- toYaml .Values.traces.kafka | nindent 6 }} {{- end }} {{- if or .Values.distributor.config.log_received_traces .Values.distributor.config.log_received_spans.enabled }} log_received_spans: enabled: {{ or .Values.distributor.config.log_received_traces .Values.distributor.config.log_received_spans.enabled }} include_all_attributes: {{ .Values.distributor.config.log_received_spans.include_all_attributes }} filter_by_status_error: {{ .Values.distributor.config.log_received_spans.filter_by_status_error }} {{- end }} {{- if .Values.distributor.config.extend_writes }} extend_writes: {{ .Values.distributor.config.extend_writes }} {{- end }} querier: frontend_worker: frontend_address: {{ include "tempo.resourceName" (dict "ctx" . "component" "query-frontend-discovery") }}:9095 {{- if .Values.querier.config.frontend_worker.grpc_client_config }} grpc_client_config: {{- toYaml .Values.querier.config.frontend_worker.grpc_client_config | nindent 6 }} {{- end }} trace_by_id: query_timeout: {{ .Values.querier.config.trace_by_id.query_timeout }} search: external_endpoints: {{- toYaml .Values.querier.config.search.external_endpoints | nindent 6 }} query_timeout: {{ .Values.querier.config.search.query_timeout }} prefer_self: {{ .Values.querier.config.search.prefer_self }} external_hedge_requests_at: {{ .Values.querier.config.search.external_hedge_requests_at }} external_hedge_requests_up_to: {{ .Values.querier.config.search.external_hedge_requests_up_to }} external_backend: {{ .Values.querier.config.search.external_backend }} {{- if .Values.querier.config.search.google_cloud_run }} google_cloud_run: {{- toYaml .Values.querier.config.search.google_cloud_run | nindent 6 }} {{- end }} max_concurrent_queries: {{ .Values.querier.config.max_concurrent_queries }} query_frontend: max_retries: {{ .Values.queryFrontend.config.max_retries }} search: target_bytes_per_job: {{ .Values.queryFrontend.config.search.target_bytes_per_job }} concurrent_jobs: {{ .Values.queryFrontend.config.search.concurrent_jobs }} trace_by_id: query_shards: {{ .Values.queryFrontend.config.trace_by_id.query_shards }} hedge_requests_at: {{ .Values.queryFrontend.config.trace_by_id.hedge_requests_at }} hedge_requests_up_to: {{ .Values.queryFrontend.config.trace_by_id.hedge_requests_up_to }} ingester: lifecycler: ring: replication_factor: {{ .Values.ingester.config.replication_factor }} kvstore: store: memberlist tokens_file_path: /var/tempo/tokens.json {{- if .Values.ingester.config.trace_idle_period }} trace_idle_period: {{ .Values.ingester.config.trace_idle_period }} {{- end }} {{- if .Values.ingester.config.flush_check_period }} flush_check_period: {{ .Values.ingester.config.flush_check_period }} {{- end }} {{- if .Values.ingester.config.max_block_bytes }} max_block_bytes: {{ .Values.ingester.config.max_block_bytes }} {{- end }} {{- if .Values.ingester.config.max_block_duration }} max_block_duration: {{ .Values.ingester.config.max_block_duration }} {{- end }} {{- if .Values.ingester.config.complete_block_timeout }} complete_block_timeout: {{ .Values.ingester.config.complete_block_timeout }} {{- end }} {{- if .Values.ingester.config.flush_all_on_shutdown }} flush_all_on_shutdown: {{ .Values.ingester.config.flush_all_on_shutdown }} {{- end }} memberlist: {{- with .Values.memberlist }} {{- toYaml . | nindent 2 }} {{- end }} join_members: - dns+{{ include "tempo.fullname" . }}-gossip-ring:{{ .Values.memberlist.bind_port }} overrides: {{- toYaml .Values.global_overrides | nindent 2 }} {{- if .Values.metricsGenerator.enabled }} metrics_generator_processors: {{- range .Values.global_overrides.metrics_generator_processors }} - {{ . }} {{- end }} {{- end }} server: http_listen_port: {{ .Values.server.httpListenPort }} log_level: {{ .Values.server.logLevel }} log_format: {{ .Values.server.logFormat }} grpc_server_max_recv_msg_size: {{ .Values.server.grpc_server_max_recv_msg_size }} grpc_server_max_send_msg_size: {{ .Values.server.grpc_server_max_send_msg_size }} http_server_read_timeout: {{ .Values.server.http_server_read_timeout }} http_server_write_timeout: {{ .Values.server.http_server_write_timeout }} storage: trace: {{- if .Values.storage.trace.block.version }} block: version: {{.Values.storage.trace.block.version}} {{- end }} pool: max_workers: {{ .Values.storage.trace.pool.max_workers }} queue_depth: {{ .Values.storage.trace.pool.queue_depth }} backend: {{.Values.storage.trace.backend}} {{- if eq .Values.storage.trace.backend "s3"}} s3: {{- toYaml .Values.storage.trace.s3 | nindent 6}} {{- end }} {{- if eq .Values.storage.trace.backend "gcs"}} gcs: {{- toYaml .Values.storage.trace.gcs | nindent 6}} {{- end }} {{- if eq .Values.storage.trace.backend "azure"}} azure: {{- toYaml .Values.storage.trace.azure | nindent 6}} {{- end }} blocklist_poll: 5m local: path: /var/tempo/traces wal: path: /var/tempo/wal {{- if .Values.memcached.enabled }} cache: memcached memcached: consistent_hash: true host: {{ include "tempo.fullname" . }}-memcached service: memcached-client timeout: 500ms {{- end }} # Set Tempo server configuration # Refers to https://grafana.com/docs/tempo/latest/configuration/#server server: # -- HTTP server listen host httpListenPort: 3100 # -- Log level. Can be set to trace, debug, info (default), warn, error, fatal, panic logLevel: info # -- Log format. Can be set to logfmt (default) or json. logFormat: logfmt # -- Max gRPC message size that can be received grpc_server_max_recv_msg_size: 4194304 # -- Max gRPC message size that can be sent grpc_server_max_send_msg_size: 4194304 # -- Read timeout for HTTP server http_server_read_timeout: 30s # -- Write timeout for HTTP server http_server_write_timeout: 30s # To configure a different storage backend instead of local storage: # storage: # trace: # backend: azure # azure: # container_name: # storage_account_name: # storage_account_key: storage: trace: # Settings for the block storage backend and buckets. block: # -- The supported block versions are specified here https://grafana.com/docs/tempo/latest/configuration/parquet/ version: null # -- The supported storage backends are gcs, s3 and azure, as specified in https://grafana.com/docs/tempo/latest/configuration/#storage backend: s3 s3: access_key: 'LHy8wIkIDB602aH34nGb' secret_key: 'coxPe9FpiYP6QmWDRsTMiNx4S7JBidd9ThwjZm53' bucket: 'tempo' endpoint: 'minio.base.svc.cluster.local:9000' insecure: true # The worker pool is used primarily when finding traces by id, but is also used by other. pool: # -- Total number of workers pulling jobs from the queue max_workers: 400 # -- Length of job queue. imporatant for querier as it queues a job for every block it has to search queue_depth: 20000 # Settings for the Admin client storage backend and buckets. Only valid is enterprise.enabled is true. admin: # -- The supported storage backends are gcs, s3 and azure, as specified in https://grafana.com/docs/enterprise-traces/latest/config/reference/#admin_client_config backend: filesystem # Global overrides global_overrides: per_tenant_override_config: /runtime-config/overrides.yaml # -- List of enabled metrics generator processors ([service-graphs, span-metrics]) metrics_generator_processors: [] # Per tenants overrides overrides: | overrides: {} # memcached is for all of the Tempo pieces to coordinate with each other. # you can use your self memcacherd by set enable: false and host + service memcached: # -- Specified whether the memcached cachce should be enabled enabled: true image: # -- The Docker registry for the Memcached image. Overrides `global.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `global.image.pullSecrets` pullSecrets: [] # -- Memcached Docker image repository repository: memcached # -- Memcached Docker image tag tag: 1.5.17-alpine # -- Memcached Docker image pull policy pullPolicy: IfNotPresent host: memcached # Number of replicas for memchached replicas: 1 # -- Additional CLI args for memcached extraArgs: [] # -- Environment variables to add to memcached pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to memcached pods extraEnvFrom: [] # -- Labels for memcached pods podLabels: {} # -- Annotations for memcached pods podAnnotations: {} # -- Resource requests and limits for memcached resources: {} # -- topologySpread for memcached pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "memcached") | nindent 6 }} # -- Affinity for memcached pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "memcached") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "memcached") | nindent 12 }} topologyKey: topology.kubernetes.io/zone service: # -- Annotations for memcached service annotations: {} memcachedExporter: # -- Specifies whether the Memcached Exporter should be enabled enabled: false # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld image: # -- The Docker registry for the Memcached Exporter image. Overrides `global.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `global.image.pullSecrets` pullSecrets: [] # -- Memcached Exporter Docker image repository repository: prom/memcached-exporter # -- Memcached Exporter Docker image tag tag: v0.8.0 # -- Memcached Exporter Docker image pull policy pullPolicy: IfNotPresent # -- Memcached Exporter resource requests and limits resources: {} metaMonitoring: # ServiceMonitor configuration serviceMonitor: # -- If enabled, ServiceMonitor resources for Prometheus Operator are created enabled: false # -- Alternative namespace for ServiceMonitor resources namespace: null # -- Namespace selector for ServiceMonitor resources namespaceSelector: {} # -- ServiceMonitor annotations annotations: {} # -- Additional ServiceMonitor labels labels: {} # -- ServiceMonitor scrape interval interval: null # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) scrapeTimeout: null # -- ServiceMonitor relabel configs to apply to samples before scraping # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig relabelings: [] # -- ServiceMonitor metric relabel configs to apply to samples before ingestion # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint metricRelabelings: [] # -- ServiceMonitor will use http by default, but you can pick https as well scheme: http # -- ServiceMonitor will use these tlsConfig settings to make the health check requests tlsConfig: null # metaMonitoringAgent configures the built in Grafana Agent that can scrape metrics and logs and send them to a local or remote destination grafanaAgent: # -- Controls whether to create PodLogs, MetricsInstance, LogsInstance, and GrafanaAgent CRs to scrape the # ServiceMonitors of the chart and ship metrics and logs to the remote endpoints below. # Note that you need to configure serviceMonitor in order to have some metrics available. enabled: false # -- Controls whether to install the Grafana Agent Operator and its CRDs. # Note that helm will not install CRDs if this flag is enabled during an upgrade. # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds installOperator: false logs: # -- Default destination for logs. The config here is translated to Promtail client # configuration to write logs to this Loki-compatible remote. Optional. remote: # -- Full URL for Loki push endpoint. Usually ends in /loki/api/v1/push url: '' auth: # -- Used to set X-Scope-OrgID header on requests. Usually not used in combination with username and password. tenantId: '' # -- Basic authentication username. Optional. username: '' # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. passwordSecretName: '' # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. passwordSecretKey: '' # -- Client configurations for the LogsInstance that will scrape Mimir pods. Follows the format of .remote. additionalClientConfigs: [] metrics: # -- Default destination for metrics. The config here is translated to remote_write # configuration to push metrics to this Prometheus-compatible remote. Optional. # Note that you need to configure serviceMonitor in order to have some metrics available. remote: # -- Full URL for Prometheus remote-write. Usually ends in /push url: '' # -- Used to add HTTP headers to remote-write requests. headers: {} auth: # -- Basic authentication username. Optional. username: '' # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. passwordSecretName: '' # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. passwordSecretKey: '' # -- Additional remote-write for the MetricsInstance that will scrape Mimir pods. Follows the format of .remote. additionalRemoteWriteConfigs: [] scrapeK8s: # -- When grafanaAgent.enabled and serviceMonitor.enabled, controls whether to create ServiceMonitors CRs # for cadvisor, kubelet, and kube-state-metrics. The scraped metrics are reduced to those pertaining to # Mimir pods only. enabled: true # -- Controls service discovery of kube-state-metrics. kubeStateMetrics: namespace: kube-system labelSelectors: app.kubernetes.io/name: kube-state-metrics # -- Sets the namespace of the resources. Leave empty or unset to use the same namespace as the Helm release. namespace: '' # -- Labels to add to all monitoring.grafana.com custom resources. # Does not affect the ServiceMonitors for kubernetes metrics; use serviceMonitor.labels for that. labels: {} # -- Annotations to add to all monitoring.grafana.com custom resources. # Does not affect the ServiceMonitors for kubernetes metrics; use serviceMonitor.annotations for that. annotations: {} # Rules for the Prometheus Operator prometheusRule: # -- If enabled, a PrometheusRule resource for Prometheus Operator is created enabled: false # -- Alternative namespace for the PrometheusRule resource namespace: null # -- PrometheusRule annotations annotations: {} # -- Additional PrometheusRule labels labels: {} # -- Contents of Prometheus rules file groups: [] # - name: loki-rules # rules: # - record: job:loki_request_duration_seconds_bucket:sum_rate # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job) # - record: job_route:loki_request_duration_seconds_bucket:sum_rate # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route) # - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate # expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container) minio: enabled: false mode: standalone rootUser: cecf rootPassword: cecf@cestong.com buckets: # Default Tempo storage bucket. - name: tempo-traces policy: none purge: false persistence: size: 20Gi resources: requests: cpu: 100m memory: 128Mi # Changed the mc config path to '/tmp' from '/etc' as '/etc' is only writable by root and OpenShift will not permit this. configPathmc: '/tmp/minio/mc/' # Configuration for the gateway gateway: # -- Specifies whether the gateway should be enabled enabled: false # -- Number of replicas for the gateway replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld autoscaling: # -- Enable autoscaling for the gateway enabled: false # -- Minimum autoscaling replicas for the gateway minReplicas: 1 # -- Maximum autoscaling replicas for the gateway maxReplicas: 3 # -- Autoscaling behavior configuration for the gateway behavior: {} # -- Target CPU utilisation percentage for the gateway targetCPUUtilizationPercentage: 60 # -- Target memory utilisation percentage for the gateway targetMemoryUtilizationPercentage: # -- Enable logging of 2xx and 3xx HTTP requests verboseLogging: true image: # -- The Docker registry for the gateway image. Overrides `global.image.registry` registry: null # -- Optional list of imagePullSecrets. Overrides `global.image.pullSecrets` pullSecrets: [] # -- The gateway image repository repository: nginxinc/nginx-unprivileged # -- The gateway image tag tag: 1.19-alpine # -- The gateway image pull policy pullPolicy: IfNotPresent # -- The name of the PriorityClass for gateway pods priorityClassName: null # -- Labels for gateway pods podLabels: {} # -- Annotations for gateway pods podAnnotations: {} # -- Additional CLI args for the gateway extraArgs: [] # -- Environment variables to add to the gateway pods extraEnv: [] # -- Environment variables from secrets or configmaps to add to the gateway pods extraEnvFrom: [] # -- Volumes to add to the gateway pods extraVolumes: [] # -- Volume mounts to add to the gateway pods extraVolumeMounts: [] # -- Resource requests and limits for the gateway resources: {} # -- Grace period to allow the gateway to shutdown before it is killed terminationGracePeriodSeconds: 30 # -- topologySpread for gateway pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "gateway") | nindent 6 }} # -- Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "gateway") | nindent 10 }} topologyKey: kubernetes.io/hostname preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "gateway") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # -- Node selector for gateway pods nodeSelector: {} # -- Tolerations for gateway pods tolerations: [] # Gateway service configuration service: # -- Port of the gateway service port: 80 # -- Type of the gateway service type: ClusterIP # -- ClusterIP of the gateway service clusterIP: null # -- Node port if service type is NodePort nodePort: null # -- Load balancer IPO address if service type is LoadBalancer loadBalancerIP: null # -- Annotations for the gateway service annotations: {} # -- Labels for gateway service labels: {} # -- Additional ports to be opneed on gateway service (e.g. for RPC connections) additionalPorts: {} # Gateway ingress configuration ingress: # -- Specifies whether an ingress for the gateway should be created enabled: false # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 # ingressClassName: nginx # -- Annotations for the gateway ingress annotations: {} # -- Hosts configuration for the gateway ingress hosts: - host: gateway.tempo.example.com paths: - path: / # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers # pathType: Prefix # -- TLS configuration for the gateway ingress tls: - secretName: tempo-gateway-tls hosts: - gateway.tempo.example.com # Basic auth configuration basicAuth: # -- Enables basic authentication for the gateway enabled: false # -- The basic auth username for the gateway username: null # -- The basic auth password for the gateway password: null # -- Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function. # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes # high CPU load. htpasswd: >- {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }} # -- Existing basic auth secret to use. Must contain '.htpasswd' existingSecret: null # Configures the readiness probe for the gateway readinessProbe: httpGet: path: / port: http-metrics initialDelaySeconds: 15 timeoutSeconds: 1 nginxConfig: # -- NGINX log format logFormat: |- main '$remote_addr - $remote_user [$time_local] $status ' '"$request" $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; # -- Allows appending custom configuration to the server block serverSnippet: '' # -- Allows appending custom configuration to the http block httpSnippet: '' # -- Allows overriding the DNS resolver address nginx will use resolver: '' # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating # @default -- See values.yaml file: | worker_processes 5; ## Default: 1 error_log /dev/stderr; pid /tmp/nginx.pid; worker_rlimit_nofile 8192; events { worker_connections 4096; ## Default: 1024 } http { client_body_temp_path /tmp/client_temp; proxy_temp_path /tmp/proxy_temp_path; fastcgi_temp_path /tmp/fastcgi_temp; uwsgi_temp_path /tmp/uwsgi_temp; scgi_temp_path /tmp/scgi_temp; proxy_http_version 1.1; default_type application/octet-stream; log_format {{ .Values.gateway.nginxConfig.logFormat }} {{- if .Values.gateway.verboseLogging }} access_log /dev/stderr main; {{- else }} map $status $loggable { ~^[23] 0; default 1; } access_log /dev/stderr main if=$loggable; {{- end }} sendfile on; tcp_nopush on; {{- if .Values.gateway.nginxConfig.resolver }} resolver {{ .Values.gateway.nginxConfig.resolver }}; {{- else }} resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }}; {{- end }} {{- with .Values.gateway.nginxConfig.httpSnippet }} {{ . | nindent 2 }} {{- end }} server { listen 8080; {{- if .Values.gateway.basicAuth.enabled }} auth_basic "Tempo"; auth_basic_user_file /etc/nginx/secrets/.htpasswd; {{- end }} location = / { return 200 'OK'; auth_basic off; } location = /jaeger/api/traces { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "distributor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:14268/api/traces; } location = /zipkin/spans { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "distributor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:9411/spans; } location = /v1/traces { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "distributor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:4318/v1/traces; } location ^~ /api { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "query-frontend") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } location = /flush { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "ingester") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } location = /shutdown { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "ingester") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } location = /distributor/ring { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "distributor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } location = /ingester/ring { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "distributor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } location = /compactor/ring { proxy_pass http://{{ include "tempo.resourceName" (dict "ctx" . "component" "compactor") }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; } {{- with .Values.gateway.nginxConfig.serverSnippet }} {{ . | nindent 4 }} {{- end }} } } ############################################################################## # The values in and after the `enterprise:` key configure the enterprise features enterprise: # Enable enterprise features. License must be provided, nginx gateway is not installed, instead # the enterprise gateway is used. enabled: false image: # -- Grafana Enterprise Metrics container image repository. Note: for Grafana Tempo use the value 'image.repository' repository: grafana/enterprise-traces # -- Grafana Enterprise Metrics container image tag. Note: for Grafana Tempo use the value 'image.tag' tag: v2.3.1 # Note: pullPolicy and optional pullSecrets are set in toplevel 'image' section, not here # In order to use Grafana Enterprise Traces features, you will need to provide the contents of your Grafana Enterprise Traces # license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. # To set the license contents, use the flag `--set-file 'license.contents=./license.jwt'` # To use your own Kubernetes Secret, `--set license.external=true`. license: contents: 'NOTAVALIDLICENSE' external: false secretName: '{{ include "tempo.resourceName" (dict "ctx" . "component" "license") }}' # Settings for the initial admin(istrator) token generator job. Can only be enabled if # enterprise.enabled is true - requires license. tokengenJob: enable: true # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld extraArgs: {} env: [] extraEnvFrom: [] annotations: {} initContainers: [] # -- The SecurityContext for tokenjobgen containers containerSecurityContext: readOnlyRootFilesystem: true # Settings for the admin_api service providing authentication and authorization service. # Can only be enabled if enterprise.enabled is true - requires license. adminApi: replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld annotations: {} service: annotations: {} labels: {} initContainers: [] strategy: type: RollingUpdate rollingUpdate: maxSurge: 0 maxUnavailable: 1 podLabels: {} podAnnotations: {} nodeSelector: {} # -- topologySpread for admin-api pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "admin-api") | nindent 6 }} # -- Affinity for admin-api pods. Passed through `tpl` and, thus, to be configured as string # @default -- Soft node and soft zone anti-affinity affinity: | podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "admin-api") | nindent 12 }} topologyKey: kubernetes.io/hostname - weight: 75 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "admin-api") | nindent 12 }} topologyKey: topology.kubernetes.io/zone # Pod Disruption Budget podDisruptionBudget: {} securityContext: {} # -- The SecurityContext for admin_api containers containerSecurityContext: readOnlyRootFilesystem: true extraArgs: {} persistence: subPath: readinessProbe: httpGet: path: /ready port: http-metrics initialDelaySeconds: 45 resources: requests: cpu: 10m memory: 32Mi terminationGracePeriodSeconds: 60 tolerations: [] extraContainers: [] extraVolumes: [] extraVolumeMounts: [] env: [] extraEnvFrom: [] # Settings for the gateway service providing authentication and authorization via the admin_api. # Can only be enabled if enterprise.enabled is true - requires license. enterpriseGateway: # -- If you want to use your own proxy URLs, set this to false. useDefaultProxyURLs: true # -- Proxy URLs defined in this object will be used if useDefaultProxyURLs is set to false. proxy: {} replicas: 1 # -- hostAliases to add hostAliases: [] # - ip: 1.2.3.4 # hostnames: # - domain.tld annotations: {} service: annotations: {} labels: {} # -- If the port is left undefined, the service will listen on the same port as the pod port: null strategy: type: RollingUpdate rollingUpdate: maxSurge: 0 maxUnavailable: 1 podLabels: {} podAnnotations: {} # Pod Disruption Budget podDisruptionBudget: {} nodeSelector: {} # -- topologySpread for enterprise-gateway pods. Passed through `tpl` and, thus, to be configured as string # @default -- Defaults to allow skew no more then 1 node per AZ topologySpreadConstraints: | - maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "enterprise-gateway") | nindent 6 }} # -- Affinity for enterprise-gateway pods. Passed through `tpl` and, thus, to be configured as string # @default -- Soft node and soft zone anti-affinity affinity: | podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "enterprise-gateway") | nindent 12 }} topologyKey: kubernetes.io/hostname - weight: 75 podAffinityTerm: labelSelector: matchLabels: {{- include "tempo.selectorLabels" (dict "ctx" . "component" "enterprise-gateway") | nindent 12 }} topologyKey: topology.kubernetes.io/zone securityContext: {} # -- The SecurityContext for gateway containers containerSecurityContext: readOnlyRootFilesystem: true initContainers: [] extraArgs: {} persistence: subPath: readinessProbe: httpGet: path: /ready port: http-metrics initialDelaySeconds: 45 resources: requests: cpu: 10m memory: 32Mi terminationGracePeriodSeconds: 60 tolerations: [] extraContainers: [] extraVolumes: [] extraVolumeMounts: [] env: [] extraEnvFrom: [] # Ingress configuration ingress: # -- Specifies whether an ingress for the gateway should be created enabled: false # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 # ingressClassName: gateway # -- Annotations for the gateway ingress annotations: {} # -- Hosts configuration for the gateway ingress hosts: - host: gateway.gem.example.com paths: - path: / # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers # pathType: Prefix # -- TLS configuration for the gateway ingress tls: - secretName: gem-gateway-tls hosts: - gateway.gem.example.com # -- Create extra manifests via values. extraObjects: [] # - apiVersion: "kubernetes-client.io/v1" # kind: ExternalSecret # metadata: # name: tempo-secrets # spec: # backendType: aws # data: # - key: secret-access-key # name: awssm-secret