bak-conf.yaml 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. env:
  2. java:
  3. opts:
  4. all:
  5. jobmanager:
  6. bind-host: localhost
  7. rpc:
  8. address: localhost
  9. port: 6123
  10. memory:
  11. process:
  12. size: 1600m
  13. execution:
  14. failover-strategy: region
  15. taskmanager:
  16. bind-host: localhost
  17. host: localhost
  18. numberOfTaskSlots: 3
  19. memory:
  20. process:
  21. size: 1728m
  22. parallelism:
  23. default: 3
  24. kubernetes:
  25. # cluster-id: learn
  26. namespace: data
  27. #rest-service:
  28. # exposed:
  29. # type: NodePort
  30. #==============================================================================
  31. # High Availability
  32. #==============================================================================
  33. # high-availability:
  34. # # The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
  35. # type: zookeeper
  36. # # The path where metadata for master recovery is persisted. While ZooKeeper stores
  37. # # the small ground truth for checkpoint and leader election, this location stores
  38. # # the larger objects, like persisted dataflow graphs.
  39. # #
  40. # # Must be a durable file system that is accessible from all nodes
  41. # # (like HDFS, S3, Ceph, nfs, ...)
  42. # storageDir: hdfs:///flink/ha/
  43. # zookeeper:
  44. # # The list of ZooKeeper quorum peers that coordinate the high-availability
  45. # # setup. This must be a list of the form:
  46. # # "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
  47. # quorum: localhost:2181
  48. # client:
  49. # # ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
  50. # # It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
  51. # # The default value is "open" and it can be changed to "creator" if ZK security is enabled
  52. # acl: open
  53. #==============================================================================
  54. # Fault tolerance and checkpointing
  55. #==============================================================================
  56. # The backend that will be used to store operator state checkpoints if
  57. # checkpointing is enabled. Checkpointing is enabled when execution.checkpointing.interval > 0.
  58. # # Execution checkpointing related parameters. Please refer to CheckpointConfig and ExecutionCheckpointingOptions for more details.
  59. # execution:
  60. # checkpointing:
  61. # interval: 3min
  62. # externalized-checkpoint-retention: [DELETE_ON_CANCELLATION, RETAIN_ON_CANCELLATION]
  63. # max-concurrent-checkpoints: 1
  64. # min-pause: 0
  65. # mode: [EXACTLY_ONCE, AT_LEAST_ONCE]
  66. # timeout: 10min
  67. # tolerable-failed-checkpoints: 0
  68. # unaligned: false
  69. # state:
  70. # backend:
  71. # # Supported backends are 'hashmap', 'rocksdb', or the
  72. # # <class-name-of-factory>.
  73. # type: hashmap
  74. # # Flag to enable/disable incremental checkpoints for backends that
  75. # # support incremental checkpoints (like the RocksDB state backend).
  76. # incremental: false
  77. # checkpoints:
  78. # # Directory for checkpoints filesystem, when using any of the default bundled
  79. # # state backends.
  80. # dir: hdfs://namenode-host:port/flink-checkpoints
  81. # savepoints:
  82. # # Default target directory for savepoints, optional.
  83. # dir: hdfs://namenode-host:port/flink-savepoints
  84. #==============================================================================
  85. # Rest & web frontend
  86. #==============================================================================
  87. rest:
  88. # The address to which the REST client will connect to
  89. address: localhost
  90. # The address that the REST & web server binds to
  91. # By default, this is localhost, which prevents the REST & web server from
  92. # being able to communicate outside of the machine/container it is running on.
  93. #
  94. # To enable this, set the bind address to one that has access to outside-facing
  95. # network interface, such as 0.0.0.0.
  96. bind-address: localhost
  97. # # The port to which the REST client connects to. If rest.bind-port has
  98. # # not been specified, then the server will bind to this port as well.
  99. # port: 8081
  100. # # Port range for the REST and web server to bind to.
  101. # bind-port: 8080-8090
  102. # web:
  103. # submit:
  104. # # Flag to specify whether job submission is enabled from the web-based
  105. # # runtime monitor. Uncomment to disable.
  106. # enable: false
  107. # cancel:
  108. # # Flag to specify whether job cancellation is enabled from the web-based
  109. # # runtime monitor. Uncomment to disable.
  110. # enable: false
  111. #==============================================================================
  112. # Advanced
  113. #==============================================================================
  114. # io:
  115. # tmp:
  116. # # Override the directories for temporary files. If not specified, the
  117. # # system-specific Java temporary directory (java.io.tmpdir property) is taken.
  118. # #
  119. # # For framework setups on Yarn, Flink will automatically pick up the
  120. # # containers' temp directories without any need for configuration.
  121. # #
  122. # # Add a delimited list for multiple directories, using the system directory
  123. # # delimiter (colon ':' on unix) or a comma, e.g.:
  124. # # /data1/tmp:/data2/tmp:/data3/tmp
  125. # #
  126. # # Note: Each directory entry is read from and written to by a different I/O
  127. # # thread. You can include the same directory multiple times in order to create
  128. # # multiple I/O threads against that directory. This is for example relevant for
  129. # # high-throughput RAIDs.
  130. # dirs: /tmp
  131. # classloader:
  132. # resolve:
  133. # # The classloading resolve order. Possible values are 'child-first' (Flink's default)
  134. # # and 'parent-first' (Java's default).
  135. # #
  136. # # Child first classloading allows users to use different dependency/library
  137. # # versions in their application than those in the classpath. Switching back
  138. # # to 'parent-first' may help with debugging dependency issues.
  139. # order: child-first
  140. # The amount of memory going to the network stack. These numbers usually need
  141. # no tuning. Adjusting them may be necessary in case of an "Insufficient number
  142. # of network buffers" error. The default min is 64MB, the default max is 1GB.
  143. #
  144. # taskmanager:
  145. # memory:
  146. # network:
  147. # fraction: 0.1
  148. # min: 64mb
  149. # max: 1gb
  150. #==============================================================================
  151. # Flink Cluster Security Configuration
  152. #==============================================================================
  153. # Kerberos authentication for various components - Hadoop, ZooKeeper, and connectors -
  154. # may be enabled in four steps:
  155. # 1. configure the local krb5.conf file
  156. # 2. provide Kerberos credentials (either a keytab or a ticket cache w/ kinit)
  157. # 3. make the credentials available to various JAAS login contexts
  158. # 4. configure the connector to use JAAS/SASL
  159. # # The below configure how Kerberos credentials are provided. A keytab will be used instead of
  160. # # a ticket cache if the keytab path and principal are set.
  161. # security:
  162. # kerberos:
  163. # login:
  164. # use-ticket-cache: true
  165. # keytab: /path/to/kerberos/keytab
  166. # principal: flink-user
  167. # # The configuration below defines which JAAS login contexts
  168. # contexts: Client,KafkaClient
  169. #==============================================================================
  170. # ZK Security Configuration
  171. #==============================================================================
  172. # zookeeper:
  173. # sasl:
  174. # # Below configurations are applicable if ZK ensemble is configured for security
  175. # #
  176. # # Override below configuration to provide custom ZK service name if configured
  177. # # zookeeper.sasl.service-name: zookeeper
  178. # #
  179. # # The configuration below must match one of the values set in "security.kerberos.login.contexts"
  180. # login-context-name: Client
  181. #==============================================================================
  182. # HistoryServer
  183. #==============================================================================
  184. # The HistoryServer is started and stopped via bin/historyserver.sh (start|stop)
  185. #
  186. # jobmanager:
  187. # archive:
  188. # fs:
  189. # # Directory to upload completed jobs to. Add this directory to the list of
  190. # # monitored directories of the HistoryServer as well (see below).
  191. # dir: hdfs:///completed-jobs/
  192. # historyserver:
  193. # web:
  194. # # The address under which the web-based HistoryServer listens.
  195. # address: 0.0.0.0
  196. # # The port under which the web-based HistoryServer listens.
  197. # port: 8082
  198. # archive:
  199. # fs:
  200. # # Comma separated list of directories to monitor for completed jobs.
  201. # dir: hdfs:///completed-jobs/
  202. # # Interval in milliseconds for refreshing the monitored directories.
  203. # fs.refresh-interval: 10000