# Default values for opentelemetry-collector. # This is a YAML-formatted file. # Declare variables to be passed into your templates. nameOverride: "" fullnameOverride: "" # Valid values are "daemonset", "deployment", and "statefulset". mode: "daemonset" presets: logsCollection: enabled: false includeCollectorLogs: false # Enabling this writes checkpoints in /var/lib/otelcol/ host directory. # Note this changes collector's user to root, so that it can write to host directory. storeCheckpoints: false # Configures the collector to collect host metrics. # Adds the hostmetrics receiver to the metrics pipeline # and adds the necessary volumes and volume mounts. # Best used with mode = daemonset. hostMetrics: enabled: false # Configures the Kubernetes Processor to add Kubernetes metadata. # Adds the k8sattributes processor to all the pipelines # and adds the necessary rules to ClusteRole. # Best used with mode = daemonset. kubernetesAttributes: enabled: true # Configures the Kubernetes Cluster Receiver to collect cluster-level metrics. # Adds the k8s_cluster receiver to the metrics pipeline # and adds the necessary rules to ClusteRole. # Best used with mode = deployment or statefulset. clusterMetrics: enabled: false # Configures the collector to collect Kubelet metrics. # Adds the kubeletstats receiver to the metrics pipeline # and adds the necessary rules to ClusteRole. # Best used with mode = daemonset. kubeletMetrics: enabled: false configMap: # Specifies whether a configMap should be created (true by default) create: true # Base collector configuration. # Supports templating. To escape existing instances of {{ }}, use {{` `}}. # For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}. config: exporters: loki: endpoint: http://loki-loki-distributed-gateway.observe.svc.cluster.local:80/loki/api/v1/push otlphttp/deepflow: traces_endpoint: "http://deepflow-agent.deepflow/api/v1/otel/trace" tls: insecure: true retry_on_failure: enabled: true otlphttp/uptrace: #endpoint: http://39.106.74.84:34318 endpoint: http://uptrace.observe.svc.cluster.local:14318 tls: insecure: true headers: # uptrace-dsn: 'http://project2_secret_token@39.106.74.84:34318/2' uptrace-dsn: 'http://project2_secret_token@uptrace.observe.svc.cluster.local:14318/2' debug: verbosity: detailed otlp: endpoint: "tempo-distributor.observe.svc.cluster.local:4317" tls: insecure: true prometheus: endpoint: "0.0.0.0:8889" clickhouse: endpoint: "tcp://clickhouse-headless.observe.svc.cluster.local:9000?dial_timeout=10s&compress=lz4" database: otel username: default password: "cecf@cestong.com" ttl: 240h #logs_table: otel_logs #traces_table: otel_traces #metrics_table: otel_metrics timeout: 5s retry_on_failure: enabled: true initial_interval: 5s max_interval: 30s max_elapsed_time: 300s kafka/proto: brokers: - kafka-headless.observe.svc.cluster.local:9092 protocol_version: 2.0.0 encoding: otlp_proto topic: otelproto timeout: 5s retry_on_failure: enabled: true initial_interval: 5s max_interval: 30s max_elapsed_time: 120s producer: max_message_bytes: 1000000 required_acks: 1 compression: lz4 sending_queue: enabled: false kafka: brokers: - kafka-headless.observe.svc.cluster.local:9092 #- kafka-broker.ob.svc.cluster.local:9092 protocol_version: 2.0.0 encoding: otlp_json topic: otel timeout: 5s retry_on_failure: enabled: true initial_interval: 5s max_interval: 30s max_elapsed_time: 120s producer: max_message_bytes: 1000000 required_acks: 1 compression: lz4 flush_max_messages: 5000 sending_queue: enabled: false extensions: health_check: endpoint: "0.0.0.0:13133" processors: filter/ottl: error_mode: ignore logs: log_record: - 'severity_number < SEVERITY_NUMBER_WARN' #transform: # error_mode: ignore # log_statements: # - context: log # statements: # - set(severity_number, SEVERITY_NUMBER_INFO) where IsString(body) and IsMatch(body, "(\\s|\\[)INFO(\\s|\\])") # - set(severity_number, SEVERITY_NUMBER_WARN) where IsString(body) and IsMatch(body, "(\\s|\\[)WARN(\\s|\\])") # - set(severity_number, SEVERITY_NUMBER_ERROR) where IsString(body) and IsMatch(body, "(\\s|\\[)ERROR(\\s|\\])") # - set(severity_number, SEVERITY_NUMBER_INFO) where severity_number == SEVERITY_NUMBER_UNSPECIFIED attributes: actions: - action: insert key: loki.attribute.labels value: log.iostream resource: attributes: - action: insert key: loki.resource.labels value: k8s.container.name, k8s.container.restart_count, k8s.deployment.name, k8s.namespace.name, k8s.pod.name, k8s.node.name, k8s.daemonset.name, service.name, k8s.pod.uid batch: send_batch_max_size: 1000 send_batch_size: 500 timeout: 5s # If set to null, will be overridden with values based on k8s resource limits memory_limiter: check_interval: 1s limit_percentage: 80 spike_limit_percentage: 30 receivers: kafka: brokers: kafka-headless.observe.svc.cluster.local:9092 protocol_version: 2.0.0 topic: otel_agent encoding: otlp_proto otlp: protocols: grpc: endpoint: ${MY_POD_IP}:4317 http: endpoint: ${MY_POD_IP}:4318 prometheus: config: scrape_configs: - job_name: opentelemetry-collector scrape_interval: 10s static_configs: - targets: - ${MY_POD_IP}:8888 service: telemetry: metrics: address: ${MY_POD_IP}:8888 logs: level: "info" extensions: - health_check pipelines: metrics: receivers: - otlp - prometheus processors: - memory_limiter exporters: - prometheus #- clickhouse logs: exporters: - clickhouse processors: #- filter/ottl - memory_limiter - batch receivers: - otlp traces: exporters: - kafka/proto processors: - memory_limiter #- batch receivers: - otlp - kafka image: repository: otel/opentelemetry-collector-contrib #repository: reg.cestong.com.cn/cecf/obcol pullPolicy: Always tag: "0.108.0" digest: "" imagePullSecrets: [] # OpenTelemetry Collector executable command: name: "" extraArgs: [] serviceAccount: # Specifies whether a service account should be created create: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" clusterRole: # Specifies whether a clusterRole should be created # Some presets also trigger the creation of a cluster role and cluster role binding. # If using one of those presets, this field is no-op. create: false # Annotations to add to the clusterRole # Can be used in combination with presets that create a cluster role. annotations: {} # The name of the clusterRole to use. # If not set a name is generated using the fullname template # Can be used in combination with presets that create a cluster role. name: "" # A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/ # Can be used in combination with presets that create a cluster role to add additional rules. rules: [] # - apiGroups: # - '' # resources: # - 'pods' # - 'nodes' # verbs: # - 'get' # - 'list' # - 'watch' clusterRoleBinding: annotations: {} name: "" podSecurityContext: {} securityContext: {} nodeSelector: {} tolerations: [] affinity: {} topologySpreadConstraints: {} priorityClassName: "" extraEnvs: [] extraVolumes: [] extraVolumeMounts: [] ports: health: enabled: true containerPort: 13133 servicePort: 13133 protocol: TCP otlp: enabled: true containerPort: 4317 servicePort: 4317 #hostPort: 4317 protocol: TCP nodePort: 30317 appProtocol: grpc otlp-http: enabled: true containerPort: 4318 servicePort: 4318 #hostPort: 4318 # nodePort: 30318 protocol: TCP prome: enabled: true containerPort: 8889 servicePort: 8889 protocol: TCP metrics: enabled: true containerPort: 8888 servicePort: 8888 protocol: TCP # Resource limits & requests. Update according to your own use case as these values might be too low for a typical deployment. resources: limits: cpu: 4 memory: 4096Mi podAnnotations: prometheus.io/scrape: "true" prometheus.io/path: /metrics prometheus.io/port: "8889" podLabels: {} # Host networking requested for this pod. Use the host's network namespace. hostNetwork: false # Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None dnsPolicy: "" # only used with deployment mode replicaCount: 1 # only used with deployment mode revisionHistoryLimit: 10 annotations: {} # List of extra sidecars to add extraContainers: [] # extraContainers: # - name: test # command: # - cp # args: # - /bin/sleep # - /test/sleep # image: busybox:latest # volumeMounts: # - name: test # mountPath: /test # List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook. # Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container. initContainers: [] lifecycleHooks: {} service: type: NodePort annotations: {} ingress: enabled: true annotations: nginx.ingress.kubernetes.io/cors-allow-origin: "http://otel-demo.cestong.com.cn,http://localhost:3000,http://localhost:1234,http://localhost:5678,http://observe-front.cestong.com.cn" nginx.ingress.kubernetes.io/enable-cors: "true" nginx.ingress.kubernetes.io/cors-allow-headers: "*" nginx.ingress.kubernetes.io/cors-allow-methods: "*" nginx.ingress.kubernetes.io/cors-allow-credentials: "true" ingressClassName: nginx hosts: - host: otel-collector.cestong.com.cn paths: - path: / pathType: Prefix port: 4318 additionalIngresses: [] podMonitor: enabled: false metricsEndpoints: - port: metrics # interval: 15s # additional labels for the PodMonitor extraLabels: {} # release: kube-prometheus-stack serviceMonitor: # The service monitor by default scrapes the metrics port. # The metrics port needs to be enabled as well. enabled: false metricsEndpoints: - port: metrics # interval: 15s # additional labels for the ServiceMonitor extraLabels: {} # release: kube-prometheus-stack # PodDisruptionBudget is used only if deployment enabled podDisruptionBudget: enabled: false # minAvailable: 2 # maxUnavailable: 1 # autoscaling is used only if deployment enabled autoscaling: enabled: false minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 80 rollout: rollingUpdate: {} # When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports # maxSurge: 25% # maxUnavailable: 0 strategy: RollingUpdate prometheusRule: enabled: false groups: [] # Create default rules for monitoring the collector defaultRules: enabled: false # additional labels for the PrometheusRule extraLabels: {} statefulset: # volumeClaimTemplates for a statefulset volumeClaimTemplates: [] podManagementPolicy: "Parallel"