env: java: opts: all: jobmanager: bind-host: localhost rpc: address: localhost port: 6123 memory: process: size: 1600m execution: failover-strategy: region taskmanager: bind-host: localhost host: localhost numberOfTaskSlots: 3 memory: process: size: 1728m parallelism: default: 3 kubernetes: # cluster-id: learn namespace: data #rest-service: # exposed: # type: NodePort #============================================================================== # High Availability #============================================================================== # high-availability: # # The high-availability mode. Possible options are 'NONE' or 'zookeeper'. # type: zookeeper # # The path where metadata for master recovery is persisted. While ZooKeeper stores # # the small ground truth for checkpoint and leader election, this location stores # # the larger objects, like persisted dataflow graphs. # # # # Must be a durable file system that is accessible from all nodes # # (like HDFS, S3, Ceph, nfs, ...) # storageDir: hdfs:///flink/ha/ # zookeeper: # # The list of ZooKeeper quorum peers that coordinate the high-availability # # setup. This must be a list of the form: # # "host1:clientPort,host2:clientPort,..." (default clientPort: 2181) # quorum: localhost:2181 # client: # # ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes # # It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE) # # The default value is "open" and it can be changed to "creator" if ZK security is enabled # acl: open #============================================================================== # Fault tolerance and checkpointing #============================================================================== # The backend that will be used to store operator state checkpoints if # checkpointing is enabled. Checkpointing is enabled when execution.checkpointing.interval > 0. # # Execution checkpointing related parameters. Please refer to CheckpointConfig and ExecutionCheckpointingOptions for more details. # execution: # checkpointing: # interval: 3min # externalized-checkpoint-retention: [DELETE_ON_CANCELLATION, RETAIN_ON_CANCELLATION] # max-concurrent-checkpoints: 1 # min-pause: 0 # mode: [EXACTLY_ONCE, AT_LEAST_ONCE] # timeout: 10min # tolerable-failed-checkpoints: 0 # unaligned: false # state: # backend: # # Supported backends are 'hashmap', 'rocksdb', or the # # . # type: hashmap # # Flag to enable/disable incremental checkpoints for backends that # # support incremental checkpoints (like the RocksDB state backend). # incremental: false # checkpoints: # # Directory for checkpoints filesystem, when using any of the default bundled # # state backends. # dir: hdfs://namenode-host:port/flink-checkpoints # savepoints: # # Default target directory for savepoints, optional. # dir: hdfs://namenode-host:port/flink-savepoints #============================================================================== # Rest & web frontend #============================================================================== rest: # The address to which the REST client will connect to address: localhost # The address that the REST & web server binds to # By default, this is localhost, which prevents the REST & web server from # being able to communicate outside of the machine/container it is running on. # # To enable this, set the bind address to one that has access to outside-facing # network interface, such as 0.0.0.0. bind-address: localhost # # The port to which the REST client connects to. If rest.bind-port has # # not been specified, then the server will bind to this port as well. # port: 8081 # # Port range for the REST and web server to bind to. # bind-port: 8080-8090 # web: # submit: # # Flag to specify whether job submission is enabled from the web-based # # runtime monitor. Uncomment to disable. # enable: false # cancel: # # Flag to specify whether job cancellation is enabled from the web-based # # runtime monitor. Uncomment to disable. # enable: false #============================================================================== # Advanced #============================================================================== # io: # tmp: # # Override the directories for temporary files. If not specified, the # # system-specific Java temporary directory (java.io.tmpdir property) is taken. # # # # For framework setups on Yarn, Flink will automatically pick up the # # containers' temp directories without any need for configuration. # # # # Add a delimited list for multiple directories, using the system directory # # delimiter (colon ':' on unix) or a comma, e.g.: # # /data1/tmp:/data2/tmp:/data3/tmp # # # # Note: Each directory entry is read from and written to by a different I/O # # thread. You can include the same directory multiple times in order to create # # multiple I/O threads against that directory. This is for example relevant for # # high-throughput RAIDs. # dirs: /tmp # classloader: # resolve: # # The classloading resolve order. Possible values are 'child-first' (Flink's default) # # and 'parent-first' (Java's default). # # # # Child first classloading allows users to use different dependency/library # # versions in their application than those in the classpath. Switching back # # to 'parent-first' may help with debugging dependency issues. # order: child-first # The amount of memory going to the network stack. These numbers usually need # no tuning. Adjusting them may be necessary in case of an "Insufficient number # of network buffers" error. The default min is 64MB, the default max is 1GB. # # taskmanager: # memory: # network: # fraction: 0.1 # min: 64mb # max: 1gb #============================================================================== # Flink Cluster Security Configuration #============================================================================== # Kerberos authentication for various components - Hadoop, ZooKeeper, and connectors - # may be enabled in four steps: # 1. configure the local krb5.conf file # 2. provide Kerberos credentials (either a keytab or a ticket cache w/ kinit) # 3. make the credentials available to various JAAS login contexts # 4. configure the connector to use JAAS/SASL # # The below configure how Kerberos credentials are provided. A keytab will be used instead of # # a ticket cache if the keytab path and principal are set. # security: # kerberos: # login: # use-ticket-cache: true # keytab: /path/to/kerberos/keytab # principal: flink-user # # The configuration below defines which JAAS login contexts # contexts: Client,KafkaClient #============================================================================== # ZK Security Configuration #============================================================================== # zookeeper: # sasl: # # Below configurations are applicable if ZK ensemble is configured for security # # # # Override below configuration to provide custom ZK service name if configured # # zookeeper.sasl.service-name: zookeeper # # # # The configuration below must match one of the values set in "security.kerberos.login.contexts" # login-context-name: Client #============================================================================== # HistoryServer #============================================================================== # The HistoryServer is started and stopped via bin/historyserver.sh (start|stop) # # jobmanager: # archive: # fs: # # Directory to upload completed jobs to. Add this directory to the list of # # monitored directories of the HistoryServer as well (see below). # dir: hdfs:///completed-jobs/ # historyserver: # web: # # The address under which the web-based HistoryServer listens. # address: 0.0.0.0 # # The port under which the web-based HistoryServer listens. # port: 8082 # archive: # fs: # # Comma separated list of directories to monitor for completed jobs. # dir: hdfs:///completed-jobs/ # # Interval in milliseconds for refreshing the monitored directories. # fs.refresh-interval: 10000