# Copyright 2020 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "hdfs.fullname" . }}
labels:
{{ include "hdfs.labels" . | indent 4 }}
data:
core-site.xml: |-
fs.defaultFS
hdfs://{{ template "hdfs.fullname" . }}-namenode-0.{{ template "hdfs.fullname" . }}-namenodes:{{ .Values.namenode.ports.clientRpc }}
{{- range $k, $v := .Values.config.coreSite }}
{{$k}}
{{$v}}
{{- end }}
hdfs-site.xml: |-
dfs.namenode.name.dir
{{- template "hdfs.namenode.volumes" .Values.namenode.dataVolumes.count -}}
dfs.datanode.data.dir
{{- template "hdfs.datanode.volumes" .Values.datanode.dataVolumes.count -}}
{{- range $k, $v := .Values.config.hdfsSite }}
{{$k}}
{{$v}}
{{- end }}
{{- if .Values.config.rackAwareness }}
{{- if .Values.config.rackAwareness.nodeTopologyLabel }}
net.topology.script.file.name
/scripts/resolve-rack.sh
net.topology.script.number.args
1
{{- end }}
{{- end }}
dfs.namenode.rpc-address
{{ template "hdfs.fullname" . }}-namenode-0.{{ template "hdfs.fullname" . }}-namenodes:{{ .Values.namenode.ports.clientRpc }}
dfs.namenode.rpc-bind-host
0.0.0.0
dfs.namenode.servicerpc-address
{{ template "hdfs.fullname" . }}-namenode-0.{{ template "hdfs.fullname" . }}-namenodes:{{ .Values.namenode.ports.serviceRpc }}
dfs.namenode.servicerpc-bind-host
0.0.0.0
dfs.namenode.lifeline.rpc-address
{{ template "hdfs.fullname" . }}-namenode-0.{{ template "hdfs.fullname" . }}-namenodes:{{ .Values.namenode.ports.lifelineRpc }}
dfs.namenode.lifeline.rpc-bind-host
0.0.0.0
dfs.namenode.http-address
0.0.0.0:{{ .Values.namenode.ports.http }}
dfs.namenode.https-address
0.0.0.0:{{ .Values.namenode.ports.https }}
dfs.datanode.http.address
0.0.0.0:{{ .Values.datanode.ports.http }}
dfs.datanode.https.address
0.0.0.0:{{ .Values.datanode.ports.https }}
dfs.datanode.address
0.0.0.0:{{ .Values.datanode.ports.data }}
dfs.datanode.ipc.address
0.0.0.0:{{ .Values.datanode.ports.ipc }}
hadoop-policy.xml: |-
security.client.protocol.acl
*
ACL for ClientProtocol, which is used by user code
via the DistributedFileSystem.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.client.datanode.protocol.acl
*
ACL for ClientDatanodeProtocol, the client-to-datanode protocol
for block recovery.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.datanode.protocol.acl
hadoop
ACL for DatanodeProtocol, which is used by datanodes to
communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.inter.datanode.protocol.acl
hadoop
ACL for InterDatanodeProtocol, the inter-datanode protocol
for updating generation timestamp.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.namenode.protocol.acl
hadoop
ACL for NamenodeProtocol, the protocol used by the secondary
namenode to communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.admin.operations.protocol.acl
hadoop
ACL for AdminOperationsProtocol. Used for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.refresh.user.mappings.protocol.acl
hadoop
ACL for RefreshUserMappingsProtocol. Used to refresh
users mappings. The ACL is a comma-separated list of user and
group names. The user and group list is separated by a blank. For
e.g. "alice,bob users,wheel". A special value of "*" means all
users are allowed.
security.refresh.policy.protocol.acl
hadoop
ACL for RefreshAuthorizationPolicyProtocol, used by the
dfsadmin and mradmin commands to refresh the security policy in-effect.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
security.ha.service.protocol.acl
hadoop
ACL for HAService protocol used by HAAdmin to manage the
active and stand-by states of namenode.
security.zkfc.protocol.acl
hadoop
ACL for access to the ZK Failover Controller
security.qjournal.service.protocol.acl
hadoop
ACL for QJournalProtocol, used by the NN to communicate with
JNs when using the QuorumJournalManager for edit logs.
security.interqjournal.service.protocol.acl
hadoop
ACL for InterQJournalProtocol, used by the JN to
communicate with other JN
log4j.properties: |-
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
# Define the root logger to the system property "hadoop.root.logger"
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshold=ALL
# Null Appender
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
# Console Appender
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
# HDFS block state change log from block manager
# Uncomment the following to log normal block state change
# messages from BlockManager in NameNode.
#log4j.logger.BlockStateChange=DEBUG
# Security appender
hadoop.security.logger=INFO,RFAS
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=security.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
# hdfs audit logging
hdfs.audit.logger=INFO,RFAAUDIT
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
# NameNode metrics logging.
# The default is to retain two namenode-metrics.log files up to 64MB each.
namenode.metrics.logger=INFO,NNMETRICSRFA
log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
log4j.additivity.NameNodeMetricsLog=false
log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
# DataNode metrics logging.
# The default is to retain two datanode-metrics.log files up to 64MB each.
datanode.metrics.logger=INFO,DNMETRICSRFA
log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
log4j.additivity.DataNodeMetricsLog=false
log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
# Custom Logging levels
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
# AWS SDK & S3A FileSystem
#log4j.logger.com.amazonaws=ERROR
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
# Log levels of third-party libraries
log4j.logger.org.apache.commons.beanutils=WARN
log4j.logger.org.apache.hadoop.security.ForwardAuthentication=DEBUG