123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534 |
- 9,12c9
- < mode: ""
- <
- < # Specify which namespace should be used to deploy the resources into
- < namespaceOverride: ""
- ---
- > mode: "daemonset"
- 28d24
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver for details on the receiver.
- 35,37d30
- < # The maximum bytes size of the recombined field.
- < # Once the size exceeds the limit, all received entries of the source will be combined and flushed.
- < maxRecombineLogSize: 102400
- 42d34
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver for details on the receiver.
- 49d40
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor for details on the receiver.
- 51,71c42
- < enabled: false
- < # When enabled the processor will extra all labels for an associated pod and add them as resource attributes.
- < # The label's exact name will be the key.
- < extractAllPodLabels: false
- < # When enabled the processor will extra all annotations for an associated pod and add them as resource attributes.
- < # The annotation's exact name will be the key.
- < extractAllPodAnnotations: false
- < # Configures the collector to collect node, pod, and container metrics from the API server on a kubelet..
- < # Adds the kubeletstats receiver to the metrics pipeline
- < # and adds the necessary rules to ClusteRole.
- < # Best used with mode = daemonset.
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver for details on the receiver.
- < kubeletMetrics:
- < enabled: false
- < # Configures the collector to collect kubernetes events.
- < # Adds the k8sobject receiver to the logs pipeline
- < # and collects kubernetes events by default.
- < # Best used with mode = deployment or statefulset.
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver for details on the receiver.
- < kubernetesEvents:
- < enabled: false
- ---
- > enabled: true
- 76d46
- < # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver for details on the receiver.
- 78a49,54
- > # Configures the collector to collect Kubelet metrics.
- > # Adds the kubeletstats receiver to the metrics pipeline
- > # and adds the necessary rules to ClusteRole.
- > # Best used with mode = daemonset.
- > kubeletMetrics:
- > enabled: false
- 83,85d58
- < # Specifies an existing ConfigMap to be mounted to the pod
- < # The ConfigMap MUST include the collector configuration via a key named 'relay' or the collector will not start.
- < existingName: ""
- 92c65,152
- < debug: {}
- ---
- > loki:
- > endpoint: http://loki-loki-distributed-gateway.observe.svc.cluster.local:80/loki/api/v1/push
- > otlphttp/deepflow:
- > traces_endpoint: "http://deepflow-agent.deepflow/api/v1/otel/trace"
- > tls:
- > insecure: true
- > retry_on_failure:
- > enabled: true
- > otlphttp/uptrace:
- > #endpoint: http://39.106.74.84:34318
- > endpoint: http://uptrace.observe.svc.cluster.local:14318
- > tls:
- > insecure: true
- > headers:
- > # uptrace-dsn: 'http://project2_secret_token@39.106.74.84:34318/2'
- > uptrace-dsn: 'http://project2_secret_token@uptrace.observe.svc.cluster.local:14318/2'
- >
- > debug:
- > verbosity: detailed
- > otlp:
- > endpoint: "tempo-distributor.observe.svc.cluster.local:4317"
- > tls:
- > insecure: true
- > otlp/col2:
- > endpoint: "col-l1-collector.ob.svc.cluster.local:4317"
- > tls:
- > insecure: true
- > prometheus:
- > endpoint: "0.0.0.0:8889"
- > #namespace: default
- > clickhouse:
- > endpoint: "tcp://clickhouse-headless.observe.svc.cluster.local:9000?dial_timeout=10s&compress=lz4"
- > database: otel
- > username: default
- > password: "cecf@cestong.com"
- > ttl: 240h
- > #logs_table: otel_logs
- > #traces_table: otel_traces
- > #metrics_table: otel_metrics
- > timeout: 5s
- > retry_on_failure:
- > enabled: true
- > initial_interval: 5s
- > max_interval: 30s
- > max_elapsed_time: 300s
- > kafka:
- > brokers:
- > - kafka.observe.svc.cluster.local:9092
- > protocol_version: 2.0.0
- > encoding: otlp_proto
- > topic: otel
- > timeout: 5s
- > retry_on_failure:
- > enabled: true
- > initial_interval: 5s
- > max_interval: 30s
- > max_elapsed_time: 120s
- > producer:
- > max_message_bytes: 1000000
- > required_acks: 1
- > compression: lz4
- > sending_queue:
- > enabled: false
- > kafka/trace:
- > brokers:
- > - kafka.observe.svc.cluster.local:9092
- > protocol_version: 2.0.0
- > encoding: otlp_proto
- > topic: otel_trace
- > sending_queue:
- > enabled: false
- > kafka/metric:
- > brokers:
- > - kafka.observe.svc.cluster.local:9092
- > protocol_version: 2.0.0
- > encoding: otlp_proto
- > topic: otel_metric
- > kafka/log:
- > brokers:
- > - kafka.observe.svc.cluster.local:9092
- > protocol_version: 2.0.0
- > encoding: otlp_proto
- > topic: otel_log
- > #kafka:
- > #brokers:
- > #- kafka-0.kafka-svc.kafka.svc.cluster.local:9092
- > #topic: trace2
- > #encoding: "json"
- 97,99c157
- < health_check:
- < endpoint: ${env:MY_POD_IP}:13133
- < memory_ballast: {}
- ---
- > health_check: {}
- 101c159,187
- < batch: {}
- ---
- > filter/ottl:
- > error_mode: ignore
- > logs:
- > log_record:
- > - 'severity_number < SEVERITY_NUMBER_WARN'
- > #transform:
- > # error_mode: ignore
- > # log_statements:
- > # - context: log
- > # statements:
- > # - set(severity_number, SEVERITY_NUMBER_INFO) where IsString(body) and IsMatch(body, "(\\s|\\[)INFO(\\s|\\])")
- > # - set(severity_number, SEVERITY_NUMBER_WARN) where IsString(body) and IsMatch(body, "(\\s|\\[)WARN(\\s|\\])")
- > # - set(severity_number, SEVERITY_NUMBER_ERROR) where IsString(body) and IsMatch(body, "(\\s|\\[)ERROR(\\s|\\])")
- > # - set(severity_number, SEVERITY_NUMBER_INFO) where severity_number == SEVERITY_NUMBER_UNSPECIFIED
- > attributes:
- > actions:
- > - action: insert
- > key: loki.attribute.labels
- > value: log.iostream
- >
- > resource:
- > attributes:
- > - action: insert
- > key: loki.resource.labels
- > value: k8s.container.name, k8s.container.restart_count, k8s.deployment.name, k8s.namespace.name, k8s.pod.name, k8s.node.name, k8s.daemonset.name, service.name, k8s.pod.uid
- > batch:
- > send_batch_max_size: 1000
- > send_batch_size: 500
- > timeout: 5s
- 103c189,192
- < memory_limiter: null
- ---
- > memory_limiter:
- > check_interval: 1s
- > limit_percentage: 80
- > spike_limit_percentage: 30
- 105,112c194,198
- < jaeger:
- < protocols:
- < grpc:
- < endpoint: ${env:MY_POD_IP}:14250
- < thrift_http:
- < endpoint: ${env:MY_POD_IP}:14268
- < thrift_compact:
- < endpoint: ${env:MY_POD_IP}:6831
- ---
- > kafka:
- > brokers: kafka.observe.svc.cluster.local:9092
- > protocol_version: 2.0.0
- > topic: otel_agent
- > encoding: otlp_proto
- 116c202
- < endpoint: ${env:MY_POD_IP}:4317
- ---
- > endpoint: ${MY_POD_IP}:4317
- 118c204
- < endpoint: ${env:MY_POD_IP}:4318
- ---
- > endpoint: ${MY_POD_IP}:4318
- 126,128c212
- < - ${env:MY_POD_IP}:8888
- < zipkin:
- < endpoint: ${env:MY_POD_IP}:9411
- ---
- > - ${MY_POD_IP}:8888
- 132c216,218
- < address: ${env:MY_POD_IP}:8888
- ---
- > address: ${MY_POD_IP}:8888
- > logs:
- > level: "info"
- 135d220
- < - memory_ballast
- 139c224,227
- < - debug
- ---
- > - clickhouse
- > #- otlphttp/uptrace
- > #- kafka
- > #- otlp/col2
- 140a229
- > #- filter/ottl
- 145,153c234,246
- < metrics:
- < exporters:
- < - debug
- < processors:
- < - memory_limiter
- < - batch
- < receivers:
- < - otlp
- < - prometheus
- ---
- > #metrics:
- > #meexporters:
- > #me - clickhouse
- > #me - prometheus
- > #me #- otlphttp/uptrace
- > #me #- kafka
- > #me #- otlp/col2
- > #meprocessors:
- > #me - memory_limiter
- > #me - batch
- > #mereceivers:
- > #me - otlp
- > #me - prometheus
- 156c249,251
- < - debug
- ---
- > #- clickhouse
- > #- otlphttp/deepflow
- > - kafka
- 162,163c257
- < - jaeger
- < - zipkin
- ---
- > - kafka
- 167,170c261,265
- < repository: ""
- < pullPolicy: IfNotPresent
- < # Overrides the image tag whose default is the chart appVersion.
- < tag: ""
- ---
- > repository: otel/opentelemetry-collector-contrib
- > #repository: reg.cestong.com.cn/cecf/obcol
- > pullPolicy: Always
- > # tag: "latest"
- > tag: "0.108.0"
- 229c324
- < topologySpreadConstraints: []
- ---
- > topologySpreadConstraints: {}
- 235d329
- < extraEnvsFrom: []
- 246c340
- < hostPort: 4317
- ---
- > #hostPort: 4317
- 248c342
- < # nodePort: 30317
- ---
- > nodePort: 30317
- 254,272c348,349
- < hostPort: 4318
- < protocol: TCP
- < jaeger-compact:
- < enabled: true
- < containerPort: 6831
- < servicePort: 6831
- < hostPort: 6831
- < protocol: UDP
- < jaeger-thrift:
- < enabled: true
- < containerPort: 14268
- < servicePort: 14268
- < hostPort: 14268
- < protocol: TCP
- < jaeger-grpc:
- < enabled: true
- < containerPort: 14250
- < servicePort: 14250
- < hostPort: 14250
- ---
- > #hostPort: 4318
- > # nodePort: 30318
- 274c351
- < zipkin:
- ---
- > prome:
- 276,278c353,354
- < containerPort: 9411
- < servicePort: 9411
- < hostPort: 9411
- ---
- > containerPort: 8889
- > servicePort: 8889
- 283c359
- < enabled: false
- ---
- > enabled: true
- 289,295c365,373
- < resources: {}
- < # resources:
- < # limits:
- < # cpu: 250m
- < # memory: 512Mi
- <
- < podAnnotations: {}
- ---
- > resources:
- > limits:
- > cpu: 4
- > memory: 4096Mi
- >
- > podAnnotations:
- > prometheus.io/scrape: "true"
- > prometheus.io/path: /metrics
- > prometheus.io/port: "8889"
- 299,302d376
- < # Common labels to add to all otel-collector resources. Evaluated as a template.
- < additionalLabels: {}
- < # app.kubernetes.io/part-of: my-app
- <
- 306,312d379
- < # Adding entries to Pod /etc/hosts with HostAliases
- < # https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
- < hostAliases: []
- < # - ip: "1.2.3.4"
- < # hostnames:
- < # - "my.host.com"
- <
- 316,318d382
- < # Custom DNS config. Required when DNS policy is None.
- < dnsConfig: {}
- <
- 327,328c391
- < # List of extra sidecars to add.
- < # This also supports template content, which will eventually be converted to yaml.
- ---
- > # List of extra sidecars to add
- 343d405
- < # This also supports template content, which will eventually be converted to yaml.
- 376,411d437
- < # liveness probe configuration
- < # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
- < ##
- < livenessProbe:
- < # Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
- < # initialDelaySeconds: 1
- < # How often in seconds to perform the probe.
- < # periodSeconds: 10
- < # Number of seconds after which the probe times out.
- < # timeoutSeconds: 1
- < # Minimum consecutive failures for the probe to be considered failed after having succeeded.
- < # failureThreshold: 1
- < # Duration in seconds the pod needs to terminate gracefully upon probe failure.
- < # terminationGracePeriodSeconds: 10
- < httpGet:
- < port: 13133
- < path: /
- <
- < # readiness probe configuration
- < # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
- < ##
- < readinessProbe:
- < # Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
- < # initialDelaySeconds: 1
- < # How often (in seconds) to perform the probe.
- < # periodSeconds: 10
- < # Number of seconds after which the probe times out.
- < # timeoutSeconds: 1
- < # Minimum consecutive successes for the probe to be considered successful after having failed.
- < # successThreshold: 1
- < # Minimum consecutive failures for the probe to be considered failed after having succeeded.
- < # failureThreshold: 1
- < httpGet:
- < port: 13133
- < path: /
- <
- 413,418c439,440
- < # Enable the creation of a Service.
- < # By default, it's enabled on mode != daemonset.
- < # However, to enable it on mode = daemonset, its creation must be explicitly enabled
- < # enabled: true
- <
- < type: ClusterIP
- ---
- > #type: ClusterIP
- > type: NodePort
- 422,427d443
- <
- < # By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster'
- < # unless other value is explicitly set.
- < # Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)
- < # externalTrafficPolicy: Cluster
- <
- 430,434d445
- < # By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset
- < # unless other value is explicitly set.
- < # Setting 'internalTrafficPolicy: Cluster' on a daemonset is not recommended
- < # internalTrafficPolicy: Cluster
- <
- 436,444c447,461
- < enabled: false
- < # annotations: {}
- < # ingressClassName: nginx
- < # hosts:
- < # - host: collector.example.com
- < # paths:
- < # - path: /
- < # pathType: Prefix
- < # port: 4318
- ---
- > enabled: true
- > annotations:
- > nginx.ingress.kubernetes.io/cors-allow-origin: "http://otel-demo.cestong.com.cn,http://localhost:3000,http://localhost:1234,http://localhost:5678,http://observe-front.cestong.com.cn"
- > nginx.ingress.kubernetes.io/enable-cors: "true"
- > nginx.ingress.kubernetes.io/cors-allow-headers: "*"
- > nginx.ingress.kubernetes.io/cors-allow-methods: "*"
- > nginx.ingress.kubernetes.io/cors-allow-credentials: "true"
- >
- > ingressClassName: nginx
- > hosts:
- > - host: otel-collector.cestong.com.cn
- > paths:
- > - path: /
- > pathType: Prefix
- > port: 4318
- 498c515
- < # autoscaling is used only if mode is "deployment" or "statefulset"
- ---
- > # autoscaling is used only if deployment enabled
- 503d519
- < behavior: {}
- 528,592d543
- < # Controls if and how PVCs created by the StatefulSet are deleted. Available in Kubernetes 1.23+.
- < persistentVolumeClaimRetentionPolicy:
- < enabled: false
- < whenDeleted: Retain
- < whenScaled: Retain
- <
- < networkPolicy:
- < enabled: false
- <
- < # Annotations to add to the NetworkPolicy
- < annotations: {}
- <
- < # Configure the 'from' clause of the NetworkPolicy.
- < # By default this will restrict traffic to ports enabled for the Collector. If
- < # you wish to further restrict traffic to other hosts or specific namespaces,
- < # see the standard NetworkPolicy 'spec.ingress.from' definition for more info:
- < # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
- < allowIngressFrom: []
- < # # Allow traffic from any pod in any namespace, but not external hosts
- < # - namespaceSelector: {}
- < # # Allow external access from a specific cidr block
- < # - ipBlock:
- < # cidr: 192.168.1.64/32
- < # # Allow access from pods in specific namespaces
- < # - namespaceSelector:
- < # matchExpressions:
- < # - key: kubernetes.io/metadata.name
- < # operator: In
- < # values:
- < # - "cats"
- < # - "dogs"
- <
- < # Add additional ingress rules to specific ports
- < # Useful to allow external hosts/services to access specific ports
- < # An example is allowing an external prometheus server to scrape metrics
- < #
- < # See the standard NetworkPolicy 'spec.ingress' definition for more info:
- < # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
- < extraIngressRules: []
- < # - ports:
- < # - port: metrics
- < # protocol: TCP
- < # from:
- < # - ipBlock:
- < # cidr: 192.168.1.64/32
- <
- < # Restrict egress traffic from the OpenTelemetry collector pod
- < # See the standard NetworkPolicy 'spec.egress' definition for more info:
- < # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
- < egressRules: []
- < # - to:
- < # - namespaceSelector: {}
- < # - ipBlock:
- < # cidr: 192.168.10.10/24
- < # ports:
- < # - port: 1234
- < # protocol: TCP
- <
- < # When enabled, the chart will set the GOMEMLIMIT env var to 80% of the configured
- < # resources.limits.memory and remove the memory ballast extension.
- < # If no resources.limits.memory are defined enabling does nothing.
- < # In a future release this setting will be enabled by default.
- < # See https://github.com/open-telemetry/opentelemetry-helm-charts/issues/891
- < # for more details.
- < useGOMEMLIMIT: true
|