# Default values for opentelemetry-collector. # This is a YAML-formatted file. # Declare variables to be passed into your templates. nameOverride: "" fullnameOverride: "" # Valid values are "daemonset", "deployment", and "statefulset". mode: "daemonset" # Handles basic configuration of components that # also require k8s modifications to work correctly. # .Values.config can be used to modify/add to a preset # component configuration, but CANNOT be used to remove # preset configuration. If you require removal of any # sections of a preset configuration, you cannot use # the preset. Instead, configure the component manually in # .Values.config and use the other fields supplied in the # values.yaml to configure k8s as necessary. presets: # Configures the collector to collect logs. # Adds the filelog receiver to the logs pipeline # and adds the necessary volumes and volume mounts. # Best used with mode = daemonset. logsCollection: enabled: false includeCollectorLogs: false # Enabling this writes checkpoints in /var/lib/otelcol/ host directory. # Note this changes collector's user to root, so that it can write to host directory. storeCheckpoints: false # Configures the collector to collect host metrics. # Adds the hostmetrics receiver to the metrics pipeline # and adds the necessary volumes and volume mounts. # Best used with mode = daemonset. hostMetrics: enabled: false # Configures the Kubernetes Processor to add Kubernetes metadata. # Adds the k8sattributes processor to all the pipelines # and adds the necessary rules to ClusteRole. # Best used with mode = daemonset. kubernetesAttributes: enabled: false # Configures the Kubernetes Cluster Receiver to collect cluster-level metrics. # Adds the k8s_cluster receiver to the metrics pipeline # and adds the necessary rules to ClusteRole. # Best used with mode = deployment or statefulset. clusterMetrics: enabled: false # Configures the collector to collect Kubelet metrics. # Adds the kubeletstats receiver to the metrics pipeline # and adds the necessary rules to ClusteRole. # Best used with mode = daemonset. kubeletMetrics: enabled: false configMap: # Specifies whether a configMap should be created (true by default) create: true # Base collector configuration. # Supports templating. To escape existing instances of {{ }}, use {{` `}}. # For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}. config: exporters: otlphttp/uptrace: #endpoint: http://39.106.74.84:34318 endpoint: http://uptrace.observe.svc.cluster.local:14318 tls: insecure: true headers: # uptrace-dsn: 'http://project2_secret_token@39.106.74.84:34318/2' uptrace-dsn: 'http://project2_secret_token@uptrace.observe.svc.cluster.local:14318/2' logging: {} otlp: endpoint: "tempo.observe.svc.cluster.local:4317" tls: insecure: true prometheus: endpoint: "0.0.0.0:8889" #namespace: default clickhouse: endpoint: "tcp://clickhouse-headless.observe.svc.cluster.local:9000?dial_timeout=10s&compress=lz4" database: otel username: default password: "cecf@cestong.com" ttl_days: 10 #logs_table: otel_logs #traces_table: otel_traces #metrics_table: otel_metrics timeout: 5s retry_on_failure: enabled: true initial_interval: 5s max_interval: 30s max_elapsed_time: 300s kafka: brokers: - kafka-headless.observe.svc.cluster.local:9092 # - kafka-0.kafka-headless.observe.svc.cluster.local:9092 protocol_version: 2.0.0 encoding: otlp_json topic: otel #kafka: #brokers: #- kafka-0.kafka-svc.kafka.svc.cluster.local:9092 #topic: trace2 #encoding: "json" extensions: # The health_check extension is mandatory for this chart. # Without the health_check extension the collector will fail the readiness and liveliness probes. # The health_check extension can be modified, but should never be removed. health_check: {} memory_ballast: {} processors: k8sattributes: extract: annotations: - from: pod key: workload tag_name: k8s.annotations.workload labels: - from: pod key: app tag_name: k8s.labels.app metadata: - k8s.pod.name - k8s.pod.start_time - k8s.pod.uid - k8s.namespace.name - k8s.node.name - container.image.name - container.image.tag pod_association: - sources: - from: connection - sources: - from: resource_attribute name: k8s.pod.ip - sources: - from: resource_attribute name: k8s.pod.uid - sources: - from: resource_attribute name: container.id batch: send_batch_max_size: 100 send_batch_size: 100 timeout: 10s # If set to null, will be overridden with values based on k8s resource limits memory_limiter: null receivers: jaeger: protocols: grpc: endpoint: ${MY_POD_IP}:14250 thrift_http: endpoint: ${MY_POD_IP}:14268 thrift_compact: endpoint: ${MY_POD_IP}:6831 otlp: protocols: grpc: endpoint: ${MY_POD_IP}:4317 http: endpoint: ${MY_POD_IP}:4318 prometheus: config: scrape_configs: - job_name: opentelemetry-collector scrape_interval: 10s static_configs: - targets: - ${MY_POD_IP}:8888 zipkin: endpoint: ${MY_POD_IP}:9411 service: telemetry: metrics: address: ${MY_POD_IP}:8888 logs: level: INFO extensions: - health_check - memory_ballast pipelines: logs: exporters: - clickhouse - otlphttp/uptrace processors: - memory_limiter - batch receivers: - otlp metrics: exporters: - clickhouse - prometheus - otlphttp/uptrace processors: - memory_limiter - batch receivers: - otlp - prometheus traces: exporters: - otlp - clickhouse - otlphttp/uptrace - kafka processors: - memory_limiter - batch - k8sattributes receivers: - otlp image: # If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`. #repository: otel/opentelemetry-collector-contrib repository: reg.cestong.com.cn/cecf/opentelemetry-collector-contrib pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: "latest" # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). digest: "" imagePullSecrets: [] # OpenTelemetry Collector executable command: name: otelcontribcol extraArgs: [] serviceAccount: # Specifies whether a service account should be created create: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" clusterRole: # Specifies whether a clusterRole should be created # Some presets also trigger the creation of a cluster role and cluster role binding. # If using one of those presets, this field is no-op. create: false # Annotations to add to the clusterRole # Can be used in combination with presets that create a cluster role. annotations: {} # The name of the clusterRole to use. # If not set a name is generated using the fullname template # Can be used in combination with presets that create a cluster role. name: "" # A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/ # Can be used in combination with presets that create a cluster role to add additional rules. rules: [] # - apiGroups: # - '' # resources: # - 'pods' # - 'nodes' # verbs: # - 'get' # - 'list' # - 'watch' clusterRoleBinding: # Annotations to add to the clusterRoleBinding # Can be used in combination with presets that create a cluster role binding. annotations: {} # The name of the clusterRoleBinding to use. # If not set a name is generated using the fullname template # Can be used in combination with presets that create a cluster role binding. name: "" podSecurityContext: {} securityContext: {} nodeSelector: {} tolerations: [] affinity: {} topologySpreadConstraints: {} # Allows for pod scheduler prioritisation priorityClassName: "" extraEnvs: [] extraVolumes: [] extraVolumeMounts: [] # Configuration for ports # nodePort is also allowed ports: otlp: enabled: true containerPort: 4317 servicePort: 4317 #hostPort: 4317 protocol: TCP nodePort: 30317 appProtocol: grpc otlp-http: enabled: true containerPort: 4318 servicePort: 4318 #hostPort: 4318 # nodePort: 30318 protocol: TCP jaeger-compact: enabled: true containerPort: 6831 servicePort: 6831 #hostPort: 6831 protocol: UDP jaeger-thrift: enabled: true containerPort: 14268 servicePort: 14268 #hostPort: 14268 protocol: TCP jaeger-grpc: enabled: true containerPort: 14250 servicePort: 14250 #hostPort: 14250 protocol: TCP prome: enabled: true containerPort: 8889 servicePort: 8889 protocol: TCP zipkin: enabled: true containerPort: 9411 servicePort: 9411 #hostPort: 9411 protocol: TCP metrics: # The metrics port is disabled by default. However you need to enable the port # in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled). enabled: true containerPort: 8888 servicePort: 8888 protocol: TCP # Resource limits & requests. Update according to your own use case as these values might be too low for a typical deployment. resources: limits: cpu: 256m memory: 512Mi podAnnotations: prometheus.io/scrape: "true" prometheus.io/path: /metrics prometheus.io/port: "8889" podLabels: {} # Host networking requested for this pod. Use the host's network namespace. hostNetwork: false # Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None dnsPolicy: "" # only used with deployment mode replicaCount: 1 # only used with deployment mode revisionHistoryLimit: 10 annotations: {} # List of extra sidecars to add extraContainers: [] # extraContainers: # - name: test # command: # - cp # args: # - /bin/sleep # - /test/sleep # image: busybox:latest # volumeMounts: # - name: test # mountPath: /test # List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook. # Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container. initContainers: [] # initContainers: # - name: test # image: busybox:latest # command: # - cp # args: # - /bin/sleep # - /test/sleep # volumeMounts: # - name: test # mountPath: /test # - name: init-fs # image: busybox:latest # command: # - sh # - '-c' # - 'chown -R 10001: /var/lib/storage/otc' # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath` # volumeMounts: # - name: opentelemetry-collector-data # use the name of the volume used for persistence # mountPath: /var/lib/storage/otc # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath` # Pod lifecycle policies. lifecycleHooks: {} # lifecycleHooks: # preStop: # exec: # command: # - /test/sleep # - "5" service: #type: ClusterIP type: NodePort # type: LoadBalancer # loadBalancerIP: 1.2.3.4 # loadBalancerSourceRanges: [] annotations: {} ingress: enabled: true annotations: nginx.ingress.kubernetes.io/cors-allow-origin: "http://otel-demo.cestong.com.cn,http://localhost:3000,http://localhost:1234" nginx.ingress.kubernetes.io/enable-cors: "true" nginx.ingress.kubernetes.io/cors-allow-headers: "*" nginx.ingress.kubernetes.io/cors-allow-methods: "*" nginx.ingress.kubernetes.io/cors-allow-credentials: "true" ingressClassName: nginx hosts: - host: otel-collector.cestong.com.cn paths: - path: / pathType: Prefix port: 4318 # tls: # - secretName: collector-tls # hosts: # - collector.example.com # Additional ingresses - only created if ingress.enabled is true # Useful for when differently annotated ingress services are required # Each additional ingress needs key "name" set to something unique additionalIngresses: [] # - name: cloudwatch # ingressClassName: nginx # annotations: {} # hosts: # - host: collector.example.com # paths: # - path: / # pathType: Prefix # port: 4318 # tls: # - secretName: collector-tls # hosts: # - collector.example.com podMonitor: # The pod monitor by default scrapes the metrics port. # The metrics port needs to be enabled as well. enabled: false metricsEndpoints: - port: metrics # interval: 15s # additional labels for the PodMonitor extraLabels: {} # release: kube-prometheus-stack serviceMonitor: # The service monitor by default scrapes the metrics port. # The metrics port needs to be enabled as well. enabled: false metricsEndpoints: - port: metrics # interval: 15s # additional labels for the ServiceMonitor extraLabels: {} # release: kube-prometheus-stack # PodDisruptionBudget is used only if deployment enabled podDisruptionBudget: enabled: false # minAvailable: 2 # maxUnavailable: 1 # autoscaling is used only if deployment enabled autoscaling: enabled: false minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 80 rollout: rollingUpdate: {} # When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports # maxSurge: 25% # maxUnavailable: 0 strategy: RollingUpdate prometheusRule: enabled: false groups: [] # Create default rules for monitoring the collector defaultRules: enabled: false # additional labels for the PrometheusRule extraLabels: {} statefulset: # volumeClaimTemplates for a statefulset volumeClaimTemplates: [] podManagementPolicy: "Parallel"