config.yaml 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. env:
  2. java:
  3. opts:
  4. all: "-Dsource.pb.enable=true -Dsource.pb.kafka.brokers=kafka.observe.svc.cluster.local:9092 -Dsource.pb.kafka.topics=otelproto -DMYSQL_DSN=jdbc:mysql://mysql.observe.svc.cluster.local:3306/observe -DtraceURL.tableName=otel.otel_traces_url_local"
  5. #all: "-DKAFKA_BROKERS=kafka.observe.svc.cluster.local:9092 -DKAFKA_TOPIC=otelproto -DMYSQL_DSN=jdbc:mysql://mysql.observe.svc.cluster.local:3306/observe -Di6000.kafka.bootstrap-server=kafka.observe.svc.cluster.local:9092 -Di6000.kafka.topic=i6000_trace -Di6000.include_app_names=SPRING,OBSERVE -DtraceURL.tableName=otel.otel_traces_url_local"
  6. #==============================================================================
  7. # Common
  8. #==============================================================================
  9. jobmanager:
  10. # The host interface the JobManager will bind to. By default, this is localhost, and will prevent
  11. # the JobManager from communicating outside the machine/container it is running on.
  12. # On YARN this setting will be ignored if it is set to 'localhost', defaulting to 0.0.0.0.
  13. # On Kubernetes this setting will be ignored, defaulting to 0.0.0.0.
  14. #
  15. # To enable this, set the bind-host address to one that has access to an outside facing network
  16. # interface, such as 0.0.0.0.
  17. bind-host: localhost
  18. rpc:
  19. # The external address of the host on which the JobManager runs and can be
  20. # reached by the TaskManagers and any clients which want to connect. This setting
  21. # is only used in Standalone mode and may be overwritten on the JobManager side
  22. # by specifying the --host <hostname> parameter of the bin/jobmanager.sh executable.
  23. # In high availability mode, if you use the bin/start-cluster.sh script and setup
  24. # the conf/masters file, this will be taken care of automatically. Yarn
  25. # automatically configure the host name based on the hostname of the node where the
  26. # JobManager runs.
  27. address: localhost
  28. # The RPC port where the JobManager is reachable.
  29. port: 6123
  30. memory:
  31. process:
  32. # The total process memory size for the JobManager.
  33. # Note this accounts for all memory usage within the JobManager process, including JVM metaspace and other overhead.
  34. size: 1600m
  35. execution:
  36. # The failover strategy, i.e., how the job computation recovers from task failures.
  37. # Only restart tasks that may have been affected by the task failure, which typically includes
  38. # downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption.
  39. failover-strategy: region
  40. taskmanager:
  41. # The host interface the TaskManager will bind to. By default, this is localhost, and will prevent
  42. # the TaskManager from communicating outside the machine/container it is running on.
  43. # On YARN this setting will be ignored if it is set to 'localhost', defaulting to 0.0.0.0.
  44. # On Kubernetes this setting will be ignored, defaulting to 0.0.0.0.
  45. #
  46. # To enable this, set the bind-host address to one that has access to an outside facing network
  47. # interface, such as 0.0.0.0.
  48. bind-host: localhost
  49. # The address of the host on which the TaskManager runs and can be reached by the JobManager and
  50. # other TaskManagers. If not specified, the TaskManager will try different strategies to identify
  51. # the address.
  52. #
  53. # Note this address needs to be reachable by the JobManager and forward traffic to one of
  54. # the interfaces the TaskManager is bound to (see 'taskmanager.bind-host').
  55. #
  56. # Note also that unless all TaskManagers are running on the same machine, this address needs to be
  57. # configured separately for each TaskManager.
  58. host: localhost
  59. # The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline.
  60. numberOfTaskSlots: 2
  61. memory:
  62. task:
  63. off-heap:
  64. size: 256m
  65. network:
  66. fraction: 0.2
  67. process:
  68. # The total process memory size for the TaskManager.
  69. #
  70. # Note this accounts for all memory usage within the TaskManager process, including JVM metaspace and other overhead.
  71. # To exclude JVM metaspace and overhead, please, use total Flink memory size instead of 'taskmanager.memory.process.size'.
  72. # It is not recommended to set both 'taskmanager.memory.process.size' and Flink memory.
  73. size: 4gb
  74. parallelism:
  75. # The parallelism used for programs that did not specify and other parallelism.
  76. default: 3
  77. # # The default file system scheme and authority.
  78. # # By default file paths without scheme are interpreted relative to the local
  79. # # root file system 'file:///'. Use this to override the default and interpret
  80. # # relative paths relative to a different file system,
  81. # # for example 'hdfs://mynamenode:12345'
  82. # fs:
  83. # default-scheme: hdfs://mynamenode:12345
  84. #==============================================================================
  85. # High Availability
  86. #==============================================================================
  87. # high-availability:
  88. # # The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
  89. # type: zookeeper
  90. # # The path where metadata for master recovery is persisted. While ZooKeeper stores
  91. # # the small ground truth for checkpoint and leader election, this location stores
  92. # # the larger objects, like persisted dataflow graphs.
  93. # #
  94. # # Must be a durable file system that is accessible from all nodes
  95. # # (like HDFS, S3, Ceph, nfs, ...)
  96. # storageDir: hdfs:///flink/ha/
  97. # zookeeper:
  98. # # The list of ZooKeeper quorum peers that coordinate the high-availability
  99. # # setup. This must be a list of the form:
  100. # # "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
  101. # quorum: localhost:2181
  102. # client:
  103. # # ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
  104. # # It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
  105. # # The default value is "open" and it can be changed to "creator" if ZK security is enabled
  106. # acl: open
  107. #==============================================================================
  108. # Fault tolerance and checkpointing
  109. #==============================================================================
  110. # The backend that will be used to store operator state checkpoints if
  111. # checkpointing is enabled. Checkpointing is enabled when execution.checkpointing.interval > 0.
  112. # # Execution checkpointing related parameters. Please refer to CheckpointConfig and ExecutionCheckpointingOptions for more details.
  113. # execution:
  114. # checkpointing:
  115. # interval: 3min
  116. # externalized-checkpoint-retention: [DELETE_ON_CANCELLATION, RETAIN_ON_CANCELLATION]
  117. # max-concurrent-checkpoints: 1
  118. # min-pause: 0
  119. # mode: [EXACTLY_ONCE, AT_LEAST_ONCE]
  120. # timeout: 10min
  121. # tolerable-failed-checkpoints: 0
  122. # unaligned: false
  123. # state:
  124. # backend:
  125. # # Supported backends are 'hashmap', 'rocksdb', or the
  126. # # <class-name-of-factory>.
  127. # type: hashmap
  128. # # Flag to enable/disable incremental checkpoints for backends that
  129. # # support incremental checkpoints (like the RocksDB state backend).
  130. # incremental: false
  131. # checkpoints:
  132. # # Directory for checkpoints filesystem, when using any of the default bundled
  133. # # state backends.
  134. # dir: hdfs://namenode-host:port/flink-checkpoints
  135. # savepoints:
  136. # # Default target directory for savepoints, optional.
  137. # dir: hdfs://namenode-host:port/flink-savepoints
  138. #==============================================================================
  139. # Rest & web frontend
  140. #==============================================================================
  141. rest:
  142. # The address to which the REST client will connect to
  143. address: localhost
  144. # The address that the REST & web server binds to
  145. # By default, this is localhost, which prevents the REST & web server from
  146. # being able to communicate outside of the machine/container it is running on.
  147. #
  148. # To enable this, set the bind address to one that has access to outside-facing
  149. # network interface, such as 0.0.0.0.
  150. bind-address: localhost
  151. # # The port to which the REST client connects to. If rest.bind-port has
  152. # # not been specified, then the server will bind to this port as well.
  153. # port: 8081
  154. # # Port range for the REST and web server to bind to.
  155. # bind-port: 8080-8090
  156. # web:
  157. # submit:
  158. # # Flag to specify whether job submission is enabled from the web-based
  159. # # runtime monitor. Uncomment to disable.
  160. # enable: false
  161. # cancel:
  162. # # Flag to specify whether job cancellation is enabled from the web-based
  163. # # runtime monitor. Uncomment to disable.
  164. # enable: false
  165. #==============================================================================
  166. # Advanced
  167. #==============================================================================
  168. # io:
  169. # tmp:
  170. # # Override the directories for temporary files. If not specified, the
  171. # # system-specific Java temporary directory (java.io.tmpdir property) is taken.
  172. # #
  173. # # For framework setups on Yarn, Flink will automatically pick up the
  174. # # containers' temp directories without any need for configuration.
  175. # #
  176. # # Add a delimited list for multiple directories, using the system directory
  177. # # delimiter (colon ':' on unix) or a comma, e.g.:
  178. # # /data1/tmp:/data2/tmp:/data3/tmp
  179. # #
  180. # # Note: Each directory entry is read from and written to by a different I/O
  181. # # thread. You can include the same directory multiple times in order to create
  182. # # multiple I/O threads against that directory. This is for example relevant for
  183. # # high-throughput RAIDs.
  184. # dirs: /tmp
  185. # classloader:
  186. # resolve:
  187. # # The classloading resolve order. Possible values are 'child-first' (Flink's default)
  188. # # and 'parent-first' (Java's default).
  189. # #
  190. # # Child first classloading allows users to use different dependency/library
  191. # # versions in their application than those in the classpath. Switching back
  192. # # to 'parent-first' may help with debugging dependency issues.
  193. # order: child-first
  194. # The amount of memory going to the network stack. These numbers usually need
  195. # no tuning. Adjusting them may be necessary in case of an "Insufficient number
  196. # of network buffers" error. The default min is 64MB, the default max is 1GB.
  197. #
  198. # taskmanager:
  199. # memory:
  200. # network:
  201. # fraction: 0.1
  202. # min: 64mb
  203. # max: 1gb
  204. #==============================================================================
  205. # Flink Cluster Security Configuration
  206. #==============================================================================
  207. # Kerberos authentication for various components - Hadoop, ZooKeeper, and connectors -
  208. # may be enabled in four steps:
  209. # 1. configure the local krb5.conf file
  210. # 2. provide Kerberos credentials (either a keytab or a ticket cache w/ kinit)
  211. # 3. make the credentials available to various JAAS login contexts
  212. # 4. configure the connector to use JAAS/SASL
  213. # # The below configure how Kerberos credentials are provided. A keytab will be used instead of
  214. # # a ticket cache if the keytab path and principal are set.
  215. # security:
  216. # kerberos:
  217. # login:
  218. # use-ticket-cache: true
  219. # keytab: /path/to/kerberos/keytab
  220. # principal: flink-user
  221. # # The configuration below defines which JAAS login contexts
  222. # contexts: Client,KafkaClient
  223. #==============================================================================
  224. # ZK Security Configuration
  225. #==============================================================================
  226. # zookeeper:
  227. # sasl:
  228. # # Below configurations are applicable if ZK ensemble is configured for security
  229. # #
  230. # # Override below configuration to provide custom ZK service name if configured
  231. # # zookeeper.sasl.service-name: zookeeper
  232. # #
  233. # # The configuration below must match one of the values set in "security.kerberos.login.contexts"
  234. # login-context-name: Client
  235. #==============================================================================
  236. # HistoryServer
  237. #==============================================================================
  238. # The HistoryServer is started and stopped via bin/historyserver.sh (start|stop)
  239. #
  240. # jobmanager:
  241. # archive:
  242. # fs:
  243. # # Directory to upload completed jobs to. Add this directory to the list of
  244. # # monitored directories of the HistoryServer as well (see below).
  245. # dir: hdfs:///completed-jobs/
  246. # historyserver:
  247. # web:
  248. # # The address under which the web-based HistoryServer listens.
  249. # address: 0.0.0.0
  250. # # The port under which the web-based HistoryServer listens.
  251. # port: 8082
  252. # archive:
  253. # fs:
  254. # # Comma separated list of directories to monitor for completed jobs.
  255. # dir: hdfs:///completed-jobs/
  256. # # Interval in milliseconds for refreshing the monitored directories.
  257. # fs.refresh-interval: 10000
  258. metrics:
  259. reporter:
  260. prom:
  261. port: 9249
  262. factory:
  263. class: org.apache.flink.metrics.prometheus.PrometheusReporterFactory
  264. scope:
  265. variables:
  266. additional:
  267. biz: ob-agent-stream
  268. kubernetes:
  269. #rest-service:
  270. # exposed:
  271. # type: NodePort
  272. pod-template-file:
  273. taskmanager: ./conf/taskmanager-pod-template.yaml
  274. jobmanager:
  275. cpu:
  276. amount: 2.0
  277. taskmanager:
  278. cpu:
  279. amount: 4.0
  280. annotations:
  281. prometheus.io/scrape: true
  282. prometheus.io/port: 9249
  283. prometheus.io/path: /
  284. pipeline:
  285. serialization-config:
  286. - io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest:
  287. type: kryo
  288. kryo-type: registered
  289. class: com.twitter.chill.protobuf.ProtobufSerializer