123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682 |
- #################################################################################################################
- # The deployment for the rook operator
- # Contains the common settings for most Kubernetes deployments.
- # For example, to create the rook-ceph cluster:
- # kubectl create -f crds.yaml -f common.yaml -f operator.yaml
- # kubectl create -f cluster.yaml
- #
- # Also see other operator sample files for variations of operator.yaml:
- # - operator-openshift.yaml: Common settings for running in OpenShift
- ###############################################################################################################
- # Rook Ceph Operator Config ConfigMap
- # Use this ConfigMap to override Rook-Ceph Operator configurations.
- # NOTE! Precedence will be given to this config if the same Env Var config also exists in the
- # Operator Deployment.
- # To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
- # here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
- kind: ConfigMap
- apiVersion: v1
- metadata:
- name: rook-ceph-operator-config
- # should be in the namespace of the operator
- namespace: rook-ceph # namespace:operator
- data:
- # The logging level for the operator: ERROR | WARNING | INFO | DEBUG
- ROOK_LOG_LEVEL: "INFO"
- # Allow using loop devices for osds in test clusters.
- ROOK_CEPH_ALLOW_LOOP_DEVICES: "false"
- # Enable the CSI driver.
- # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
- ROOK_CSI_ENABLE_CEPHFS: "true"
- # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
- ROOK_CSI_ENABLE_RBD: "true"
- # Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
- ROOK_CSI_ENABLE_NFS: "false"
- # Set to true to enable Ceph CSI pvc encryption support.
- CSI_ENABLE_ENCRYPTION: "false"
- # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
- # in some network configurations where the SDN does not provide access to an external cluster or
- # there is significant drop in read/write performance.
- # CSI_ENABLE_HOST_NETWORK: "true"
- # Set to true to enable adding volume metadata on the CephFS subvolume and RBD images.
- # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
- # Hence enable metadata is false by default.
- # CSI_ENABLE_METADATA: "true"
- # cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
- # like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
- # CSI_CLUSTER_NAME: "my-prod-cluster"
- # Set logging level for cephCSI containers maintained by the cephCSI.
- # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
- # CSI_LOG_LEVEL: "0"
- # Set logging level for Kubernetes-csi sidecar containers.
- # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
- # CSI_SIDECAR_LOG_LEVEL: "0"
- # Set replicas for csi provisioner deployment.
- CSI_PROVISIONER_REPLICAS: "2"
- # OMAP generator will generate the omap mapping between the PV name and the RBD image.
- # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
- # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
- # it set it to false.
- # CSI_ENABLE_OMAP_GENERATOR: "false"
- # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
- CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
- # set to false to disable deployment of snapshotter container in NFS provisioner pod.
- CSI_ENABLE_NFS_SNAPSHOTTER: "true"
- # set to false to disable deployment of snapshotter container in RBD provisioner pod.
- CSI_ENABLE_RBD_SNAPSHOTTER: "true"
- # Enable cephfs kernel driver instead of ceph-fuse.
- # If you disable the kernel client, your application may be disrupted during upgrade.
- # See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
- # NOTE! cephfs quota is not supported in kernel version < 4.17
- CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
- # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- CSI_RBD_FSGROUPPOLICY: "File"
- # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- CSI_CEPHFS_FSGROUPPOLICY: "File"
- # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- CSI_NFS_FSGROUPPOLICY: "File"
- # (Optional) Allow starting unsupported ceph-csi image
- ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
- # (Optional) control the host mount of /etc/selinux for csi plugin pods.
- CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
- # The default version of CSI supported by Rook will be started. To change the version
- # of the CSI driver to something other than what is officially supported, change
- # these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.10.1"
- # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.1"
- # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.9.2"
- # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v3.6.2"
- # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2"
- # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.4.2"
- # To indicate the image pull policy to be applied to all the containers in the csi driver pods.
- # ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent"
- # (Optional) set user created priorityclassName for csi plugin pods.
- CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
- # (Optional) set user created priorityclassName for csi provisioner pods.
- CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
- # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
- # A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
- # Default value is 1.
- # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
- # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
- # A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
- # Default value is 1.
- # CSI_RBD_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
- # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- # CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
- # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
- # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
- # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
- # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
- # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
- # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
- # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
- # ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
- # (Optional) CephCSI CephFS plugin Volumes
- # CSI_CEPHFS_PLUGIN_VOLUME: |
- # - name: lib-modules
- # hostPath:
- # path: /run/current-system/kernel-modules/lib/modules/
- # - name: host-nix
- # hostPath:
- # path: /nix
- # (Optional) CephCSI CephFS plugin Volume mounts
- # CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: |
- # - name: host-nix
- # mountPath: /nix
- # readOnly: true
- # (Optional) CephCSI RBD plugin Volumes
- # CSI_RBD_PLUGIN_VOLUME: |
- # - name: lib-modules
- # hostPath:
- # path: /run/current-system/kernel-modules/lib/modules/
- # - name: host-nix
- # hostPath:
- # path: /nix
- # (Optional) CephCSI RBD plugin Volume mounts
- # CSI_RBD_PLUGIN_VOLUME_MOUNT: |
- # - name: host-nix
- # mountPath: /nix
- # readOnly: true
- # (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
- # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
- # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_PROVISIONER_TOLERATIONS: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/control-plane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
- # (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
- # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
- # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_PLUGIN_TOLERATIONS: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/control-plane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
- # (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
- # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
- # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_RBD_PROVISIONER_TOLERATIONS: |
- # - key: node.rook.io/rbd
- # operator: Exists
- # (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
- # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
- # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_RBD_PLUGIN_TOLERATIONS: |
- # - key: node.rook.io/rbd
- # operator: Exists
- # (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
- # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
- # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
- # - key: node.rook.io/cephfs
- # operator: Exists
- # (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
- # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
- # NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
- # valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
- # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: myKey
- # operator: DoesNotExist
- # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_CEPHFS_PLUGIN_TOLERATIONS: |
- # - key: node.rook.io/cephfs
- # operator: Exists
- # (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
- # CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
- # (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_NFS_PROVISIONER_TOLERATIONS: |
- # - key: node.rook.io/nfs
- # operator: Exists
- # (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
- # CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
- # (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_NFS_PLUGIN_TOLERATIONS: |
- # - key: node.rook.io/nfs
- # operator: Exists
- # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- #CSI_RBD_PROVISIONER_RESOURCE: |
- # - name : csi-provisioner
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-resizer
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-attacher
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-snapshotter
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-rbdplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : csi-omap-generator
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- #CSI_RBD_PLUGIN_RESOURCE: |
- # - name : driver-registrar
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # - name : csi-rbdplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- #CSI_CEPHFS_PROVISIONER_RESOURCE: |
- # - name : csi-provisioner
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-resizer
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-attacher
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-snapshotter
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-cephfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- #CSI_CEPHFS_PLUGIN_RESOURCE: |
- # - name : driver-registrar
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # - name : csi-cephfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- # CSI_NFS_PROVISIONER_RESOURCE: |
- # - name : csi-provisioner
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-nfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : csi-attacher
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- # CSI_NFS_PLUGIN_RESOURCE: |
- # - name : driver-registrar
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # - name : csi-nfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # Configure CSI CephFS liveness metrics port
- # Set to true to enable Ceph CSI liveness container.
- CSI_ENABLE_LIVENESS: "false"
- # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
- # Configure CSI RBD liveness metrics port
- # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
- # CSIADDONS_PORT: "9070"
- # Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options
- # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
- # CSI_CEPHFS_KERNEL_MOUNT_OPTIONS: "ms_mode=secure"
- # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
- ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
- # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
- # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
- ROOK_ENABLE_DISCOVERY_DAEMON: "false"
- # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
- ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
- # Enable the csi addons sidecar.
- CSI_ENABLE_CSIADDONS: "false"
- # Enable watch for faster recovery from rbd rwo node loss
- ROOK_WATCH_FOR_NODE_FAILURE: "true"
- # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
- # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
- CSI_GRPC_TIMEOUT_SECONDS: "150"
- # Enable topology based provisioning.
- CSI_ENABLE_TOPOLOGY: "false"
- # Domain labels define which node labels to use as domains
- # for CSI nodeplugins to advertise their domains
- # NOTE: the value here serves as an example and needs to be
- # updated with node labels that define domains of interest
- # CSI_TOPOLOGY_DOMAIN_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
- # Enable read affinity for RBD volumes. Recommended to
- # set to true if running kernel 5.8 or newer.
- CSI_ENABLE_READ_AFFINITY: "false"
- # CRUSH location labels define which node labels to use
- # as CRUSH location. This should correspond to the values set in
- # the CRUSH map.
- # Defaults to all the labels mentioned in
- # https://rook.io/docs/rook/latest/CRDs/Cluster/ceph-cluster-crd/#osd-topology
- # CSI_CRUSH_LOCATION_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
- # Whether to skip any attach operation altogether for CephCSI PVCs.
- # See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
- # If set to false it skips the volume attachments and makes the creation of pods using the CephCSI PVC fast.
- # **WARNING** It's highly discouraged to use this for RWO volumes. for RBD PVC it can cause data corruption,
- # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false
- # since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
- # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
- CSI_CEPHFS_ATTACH_REQUIRED: "true"
- CSI_RBD_ATTACH_REQUIRED: "true"
- CSI_NFS_ATTACH_REQUIRED: "true"
- # Rook Discover toleration. Will tolerate all taints with all keys.
- # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
- # DISCOVER_TOLERATIONS: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/control-plane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
- # (Optional) Rook Discover priority class name to set on the pod(s)
- # DISCOVER_PRIORITY_CLASS_NAME: "<PriorityClassName>"
- # (Optional) Discover Agent NodeAffinity.
- # DISCOVER_AGENT_NODE_AFFINITY: |
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: myKey
- # operator: DoesNotExist
- # (Optional) Discover Agent Pod Labels.
- # DISCOVER_AGENT_POD_LABELS: "key1=value1,key2=value2"
- # Disable automatic orchestration when new devices are discovered
- ROOK_DISABLE_DEVICE_HOTPLUG: "false"
- # The duration between discovering devices in the rook-discover daemonset.
- ROOK_DISCOVER_DEVICES_INTERVAL: "60m"
- # DISCOVER_DAEMON_RESOURCES: |
- # - name: DISCOVER_DAEMON_RESOURCES
- # resources:
- # limits:
- # cpu: 500m
- # memory: 512Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
- ---
- # OLM: BEGIN OPERATOR DEPLOYMENT
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: rook-ceph-operator
- namespace: rook-ceph # namespace:operator
- labels:
- operator: rook
- storage-backend: ceph
- app.kubernetes.io/name: rook-ceph
- app.kubernetes.io/instance: rook-ceph
- app.kubernetes.io/component: rook-ceph-operator
- app.kubernetes.io/part-of: rook-ceph-operator
- spec:
- selector:
- matchLabels:
- app: rook-ceph-operator
- strategy:
- type: Recreate
- replicas: 1
- template:
- metadata:
- labels:
- app: rook-ceph-operator
- spec:
- tolerations:
- - effect: NoExecute
- key: node.kubernetes.io/unreachable
- operator: Exists
- tolerationSeconds: 5
- serviceAccountName: rook-ceph-system
- containers:
- - name: rook-ceph-operator
- image: rook/ceph:v1.13.1
- args: ["ceph", "operator"]
- securityContext:
- runAsNonRoot: true
- runAsUser: 2016
- runAsGroup: 2016
- capabilities:
- drop: ["ALL"]
- volumeMounts:
- - mountPath: /var/lib/rook
- name: rook-config
- - mountPath: /etc/ceph
- name: default-config-dir
- env:
- # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
- # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
- - name: ROOK_CURRENT_NAMESPACE_ONLY
- value: "false"
- # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
- # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
- # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
- - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
- value: "false"
- # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
- # In case of more than one regex, use comma to separate between them.
- # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
- # Add regex expression after putting a comma to blacklist a disk
- # If value is empty, the default regex will be used.
- - name: DISCOVER_DAEMON_UDEV_BLACKLIST
- value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
- # Time to wait until the node controller will move Rook pods to other
- # nodes after detecting an unreachable node.
- # Pods affected by this setting are:
- # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
- # The value used in this variable replaces the default value of 300 secs
- # added automatically by k8s as Toleration for
- # <node.kubernetes.io/unreachable>
- # The total amount of time to reschedule Rook pods in healthy nodes
- # before detecting a <not ready node> condition will be the sum of:
- # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
- # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
- - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
- value: "5"
- # The name of the node to pass with the downward API
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- # The pod name to pass with the downward API
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- # The pod namespace to pass with the downward API
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- # Recommended resource requests and limits, if desired
- #resources:
- # limits:
- # cpu: 500m
- # memory: 512Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
- # Uncomment it to run lib bucket provisioner in multithreaded mode
- #- name: LIB_BUCKET_PROVISIONER_THREADS
- # value: "5"
- # Uncomment it to run rook operator on the host network
- #hostNetwork: true
- volumes:
- - name: rook-config
- emptyDir: {}
- - name: default-config-dir
- emptyDir: {}
- # OLM: END OPERATOR DEPLOYMENT
|