operator.yaml 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. #################################################################################################################
  2. # The deployment for the rook operator
  3. # Contains the common settings for most Kubernetes deployments.
  4. # For example, to create the rook-ceph cluster:
  5. # kubectl create -f crds.yaml -f common.yaml -f operator.yaml
  6. # kubectl create -f cluster.yaml
  7. #
  8. # Also see other operator sample files for variations of operator.yaml:
  9. # - operator-openshift.yaml: Common settings for running in OpenShift
  10. ###############################################################################################################
  11. # Rook Ceph Operator Config ConfigMap
  12. # Use this ConfigMap to override Rook-Ceph Operator configurations.
  13. # NOTE! Precedence will be given to this config if the same Env Var config also exists in the
  14. # Operator Deployment.
  15. # To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
  16. # here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
  17. kind: ConfigMap
  18. apiVersion: v1
  19. metadata:
  20. name: rook-ceph-operator-config
  21. # should be in the namespace of the operator
  22. namespace: rook-ceph # namespace:operator
  23. data:
  24. # The logging level for the operator: ERROR | WARNING | INFO | DEBUG
  25. ROOK_LOG_LEVEL: "INFO"
  26. # Allow using loop devices for osds in test clusters.
  27. ROOK_CEPH_ALLOW_LOOP_DEVICES: "false"
  28. # Enable the CSI driver.
  29. # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
  30. ROOK_CSI_ENABLE_CEPHFS: "true"
  31. # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
  32. ROOK_CSI_ENABLE_RBD: "true"
  33. # Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
  34. ROOK_CSI_ENABLE_NFS: "false"
  35. # Set to true to enable Ceph CSI pvc encryption support.
  36. CSI_ENABLE_ENCRYPTION: "false"
  37. # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
  38. # in some network configurations where the SDN does not provide access to an external cluster or
  39. # there is significant drop in read/write performance.
  40. # CSI_ENABLE_HOST_NETWORK: "true"
  41. # Set to true to enable adding volume metadata on the CephFS subvolume and RBD images.
  42. # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
  43. # Hence enable metadata is false by default.
  44. # CSI_ENABLE_METADATA: "true"
  45. # cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
  46. # like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
  47. # CSI_CLUSTER_NAME: "my-prod-cluster"
  48. # Set logging level for cephCSI containers maintained by the cephCSI.
  49. # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
  50. # CSI_LOG_LEVEL: "0"
  51. # Set logging level for Kubernetes-csi sidecar containers.
  52. # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
  53. # CSI_SIDECAR_LOG_LEVEL: "0"
  54. # Set replicas for csi provisioner deployment.
  55. CSI_PROVISIONER_REPLICAS: "2"
  56. # OMAP generator will generate the omap mapping between the PV name and the RBD image.
  57. # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
  58. # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
  59. # it set it to false.
  60. # CSI_ENABLE_OMAP_GENERATOR: "false"
  61. # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
  62. CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
  63. # set to false to disable deployment of snapshotter container in NFS provisioner pod.
  64. CSI_ENABLE_NFS_SNAPSHOTTER: "true"
  65. # set to false to disable deployment of snapshotter container in RBD provisioner pod.
  66. CSI_ENABLE_RBD_SNAPSHOTTER: "true"
  67. # Enable cephfs kernel driver instead of ceph-fuse.
  68. # If you disable the kernel client, your application may be disrupted during upgrade.
  69. # See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
  70. # NOTE! cephfs quota is not supported in kernel version < 4.17
  71. CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
  72. # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
  73. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  74. CSI_RBD_FSGROUPPOLICY: "File"
  75. # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
  76. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  77. CSI_CEPHFS_FSGROUPPOLICY: "File"
  78. # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
  79. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  80. CSI_NFS_FSGROUPPOLICY: "File"
  81. # (Optional) Allow starting unsupported ceph-csi image
  82. ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
  83. # (Optional) control the host mount of /etc/selinux for csi plugin pods.
  84. CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
  85. # The default version of CSI supported by Rook will be started. To change the version
  86. # of the CSI driver to something other than what is officially supported, change
  87. # these images to the desired release of the CSI driver.
  88. # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.10.1"
  89. # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.1"
  90. # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.9.2"
  91. # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v3.6.2"
  92. # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2"
  93. # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.4.2"
  94. # To indicate the image pull policy to be applied to all the containers in the csi driver pods.
  95. # ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent"
  96. # (Optional) set user created priorityclassName for csi plugin pods.
  97. CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
  98. # (Optional) set user created priorityclassName for csi provisioner pods.
  99. CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
  100. # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  101. # Default value is RollingUpdate.
  102. # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
  103. # A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
  104. # Default value is 1.
  105. # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
  106. # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  107. # Default value is RollingUpdate.
  108. # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
  109. # A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
  110. # Default value is 1.
  111. # CSI_RBD_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
  112. # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  113. # Default value is RollingUpdate.
  114. # CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
  115. # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
  116. # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
  117. # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
  118. # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
  119. # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
  120. # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
  121. # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
  122. # ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
  123. # (Optional) CephCSI CephFS plugin Volumes
  124. # CSI_CEPHFS_PLUGIN_VOLUME: |
  125. # - name: lib-modules
  126. # hostPath:
  127. # path: /run/current-system/kernel-modules/lib/modules/
  128. # - name: host-nix
  129. # hostPath:
  130. # path: /nix
  131. # (Optional) CephCSI CephFS plugin Volume mounts
  132. # CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: |
  133. # - name: host-nix
  134. # mountPath: /nix
  135. # readOnly: true
  136. # (Optional) CephCSI RBD plugin Volumes
  137. # CSI_RBD_PLUGIN_VOLUME: |
  138. # - name: lib-modules
  139. # hostPath:
  140. # path: /run/current-system/kernel-modules/lib/modules/
  141. # - name: host-nix
  142. # hostPath:
  143. # path: /nix
  144. # (Optional) CephCSI RBD plugin Volume mounts
  145. # CSI_RBD_PLUGIN_VOLUME_MOUNT: |
  146. # - name: host-nix
  147. # mountPath: /nix
  148. # readOnly: true
  149. # (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
  150. # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
  151. # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
  152. # Put here list of taints you want to tolerate in YAML format.
  153. # CSI provisioner would be best to start on the same nodes as other ceph daemons.
  154. # CSI_PROVISIONER_TOLERATIONS: |
  155. # - effect: NoSchedule
  156. # key: node-role.kubernetes.io/control-plane
  157. # operator: Exists
  158. # - effect: NoExecute
  159. # key: node-role.kubernetes.io/etcd
  160. # operator: Exists
  161. # (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
  162. # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
  163. # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
  164. # Put here list of taints you want to tolerate in YAML format.
  165. # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  166. # CSI_PLUGIN_TOLERATIONS: |
  167. # - effect: NoSchedule
  168. # key: node-role.kubernetes.io/control-plane
  169. # operator: Exists
  170. # - effect: NoExecute
  171. # key: node-role.kubernetes.io/etcd
  172. # operator: Exists
  173. # (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
  174. # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
  175. # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
  176. # Put here list of taints you want to tolerate in YAML format.
  177. # CSI provisioner would be best to start on the same nodes as other ceph daemons.
  178. # CSI_RBD_PROVISIONER_TOLERATIONS: |
  179. # - key: node.rook.io/rbd
  180. # operator: Exists
  181. # (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
  182. # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
  183. # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
  184. # Put here list of taints you want to tolerate in YAML format.
  185. # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  186. # CSI_RBD_PLUGIN_TOLERATIONS: |
  187. # - key: node.rook.io/rbd
  188. # operator: Exists
  189. # (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
  190. # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
  191. # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
  192. # Put here list of taints you want to tolerate in YAML format.
  193. # CSI provisioner would be best to start on the same nodes as other ceph daemons.
  194. # CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
  195. # - key: node.rook.io/cephfs
  196. # operator: Exists
  197. # (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
  198. # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
  199. # NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
  200. # valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
  201. # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
  202. # requiredDuringSchedulingIgnoredDuringExecution:
  203. # nodeSelectorTerms:
  204. # - matchExpressions:
  205. # - key: myKey
  206. # operator: DoesNotExist
  207. # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
  208. # Put here list of taints you want to tolerate in YAML format.
  209. # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  210. # CSI_CEPHFS_PLUGIN_TOLERATIONS: |
  211. # - key: node.rook.io/cephfs
  212. # operator: Exists
  213. # (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
  214. # CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
  215. # (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
  216. # Put here list of taints you want to tolerate in YAML format.
  217. # CSI provisioner would be best to start on the same nodes as other ceph daemons.
  218. # CSI_NFS_PROVISIONER_TOLERATIONS: |
  219. # - key: node.rook.io/nfs
  220. # operator: Exists
  221. # (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
  222. # CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
  223. # (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
  224. # Put here list of taints you want to tolerate in YAML format.
  225. # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  226. # CSI_NFS_PLUGIN_TOLERATIONS: |
  227. # - key: node.rook.io/nfs
  228. # operator: Exists
  229. # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
  230. # requests and limits you want to apply for provisioner pod
  231. #CSI_RBD_PROVISIONER_RESOURCE: |
  232. # - name : csi-provisioner
  233. # resource:
  234. # requests:
  235. # memory: 128Mi
  236. # cpu: 100m
  237. # limits:
  238. # memory: 256Mi
  239. # cpu: 200m
  240. # - name : csi-resizer
  241. # resource:
  242. # requests:
  243. # memory: 128Mi
  244. # cpu: 100m
  245. # limits:
  246. # memory: 256Mi
  247. # cpu: 200m
  248. # - name : csi-attacher
  249. # resource:
  250. # requests:
  251. # memory: 128Mi
  252. # cpu: 100m
  253. # limits:
  254. # memory: 256Mi
  255. # cpu: 200m
  256. # - name : csi-snapshotter
  257. # resource:
  258. # requests:
  259. # memory: 128Mi
  260. # cpu: 100m
  261. # limits:
  262. # memory: 256Mi
  263. # cpu: 200m
  264. # - name : csi-rbdplugin
  265. # resource:
  266. # requests:
  267. # memory: 512Mi
  268. # cpu: 250m
  269. # limits:
  270. # memory: 1Gi
  271. # cpu: 500m
  272. # - name : csi-omap-generator
  273. # resource:
  274. # requests:
  275. # memory: 512Mi
  276. # cpu: 250m
  277. # limits:
  278. # memory: 1Gi
  279. # cpu: 500m
  280. # - name : liveness-prometheus
  281. # resource:
  282. # requests:
  283. # memory: 128Mi
  284. # cpu: 50m
  285. # limits:
  286. # memory: 256Mi
  287. # cpu: 100m
  288. # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
  289. # requests and limits you want to apply for plugin pod
  290. #CSI_RBD_PLUGIN_RESOURCE: |
  291. # - name : driver-registrar
  292. # resource:
  293. # requests:
  294. # memory: 128Mi
  295. # cpu: 50m
  296. # limits:
  297. # memory: 256Mi
  298. # cpu: 100m
  299. # - name : csi-rbdplugin
  300. # resource:
  301. # requests:
  302. # memory: 512Mi
  303. # cpu: 250m
  304. # limits:
  305. # memory: 1Gi
  306. # cpu: 500m
  307. # - name : liveness-prometheus
  308. # resource:
  309. # requests:
  310. # memory: 128Mi
  311. # cpu: 50m
  312. # limits:
  313. # memory: 256Mi
  314. # cpu: 100m
  315. # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
  316. # requests and limits you want to apply for provisioner pod
  317. #CSI_CEPHFS_PROVISIONER_RESOURCE: |
  318. # - name : csi-provisioner
  319. # resource:
  320. # requests:
  321. # memory: 128Mi
  322. # cpu: 100m
  323. # limits:
  324. # memory: 256Mi
  325. # cpu: 200m
  326. # - name : csi-resizer
  327. # resource:
  328. # requests:
  329. # memory: 128Mi
  330. # cpu: 100m
  331. # limits:
  332. # memory: 256Mi
  333. # cpu: 200m
  334. # - name : csi-attacher
  335. # resource:
  336. # requests:
  337. # memory: 128Mi
  338. # cpu: 100m
  339. # limits:
  340. # memory: 256Mi
  341. # cpu: 200m
  342. # - name : csi-snapshotter
  343. # resource:
  344. # requests:
  345. # memory: 128Mi
  346. # cpu: 100m
  347. # limits:
  348. # memory: 256Mi
  349. # cpu: 200m
  350. # - name : csi-cephfsplugin
  351. # resource:
  352. # requests:
  353. # memory: 512Mi
  354. # cpu: 250m
  355. # limits:
  356. # memory: 1Gi
  357. # cpu: 500m
  358. # - name : liveness-prometheus
  359. # resource:
  360. # requests:
  361. # memory: 128Mi
  362. # cpu: 50m
  363. # limits:
  364. # memory: 256Mi
  365. # cpu: 100m
  366. # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
  367. # requests and limits you want to apply for plugin pod
  368. #CSI_CEPHFS_PLUGIN_RESOURCE: |
  369. # - name : driver-registrar
  370. # resource:
  371. # requests:
  372. # memory: 128Mi
  373. # cpu: 50m
  374. # limits:
  375. # memory: 256Mi
  376. # cpu: 100m
  377. # - name : csi-cephfsplugin
  378. # resource:
  379. # requests:
  380. # memory: 512Mi
  381. # cpu: 250m
  382. # limits:
  383. # memory: 1Gi
  384. # cpu: 500m
  385. # - name : liveness-prometheus
  386. # resource:
  387. # requests:
  388. # memory: 128Mi
  389. # cpu: 50m
  390. # limits:
  391. # memory: 256Mi
  392. # cpu: 100m
  393. # (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
  394. # requests and limits you want to apply for provisioner pod
  395. # CSI_NFS_PROVISIONER_RESOURCE: |
  396. # - name : csi-provisioner
  397. # resource:
  398. # requests:
  399. # memory: 128Mi
  400. # cpu: 100m
  401. # limits:
  402. # memory: 256Mi
  403. # cpu: 200m
  404. # - name : csi-nfsplugin
  405. # resource:
  406. # requests:
  407. # memory: 512Mi
  408. # cpu: 250m
  409. # limits:
  410. # memory: 1Gi
  411. # cpu: 500m
  412. # - name : csi-attacher
  413. # resource:
  414. # requests:
  415. # memory: 128Mi
  416. # cpu: 100m
  417. # limits:
  418. # memory: 256Mi
  419. # cpu: 200m
  420. # (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
  421. # requests and limits you want to apply for plugin pod
  422. # CSI_NFS_PLUGIN_RESOURCE: |
  423. # - name : driver-registrar
  424. # resource:
  425. # requests:
  426. # memory: 128Mi
  427. # cpu: 50m
  428. # limits:
  429. # memory: 256Mi
  430. # cpu: 100m
  431. # - name : csi-nfsplugin
  432. # resource:
  433. # requests:
  434. # memory: 512Mi
  435. # cpu: 250m
  436. # limits:
  437. # memory: 1Gi
  438. # cpu: 500m
  439. # Configure CSI CephFS liveness metrics port
  440. # Set to true to enable Ceph CSI liveness container.
  441. CSI_ENABLE_LIVENESS: "false"
  442. # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
  443. # Configure CSI RBD liveness metrics port
  444. # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
  445. # CSIADDONS_PORT: "9070"
  446. # Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options
  447. # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
  448. # CSI_CEPHFS_KERNEL_MOUNT_OPTIONS: "ms_mode=secure"
  449. # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
  450. ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
  451. # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
  452. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
  453. ROOK_ENABLE_DISCOVERY_DAEMON: "false"
  454. # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
  455. ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
  456. # Enable the csi addons sidecar.
  457. CSI_ENABLE_CSIADDONS: "false"
  458. # Enable watch for faster recovery from rbd rwo node loss
  459. ROOK_WATCH_FOR_NODE_FAILURE: "true"
  460. # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
  461. # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
  462. CSI_GRPC_TIMEOUT_SECONDS: "150"
  463. # Enable topology based provisioning.
  464. CSI_ENABLE_TOPOLOGY: "false"
  465. # Domain labels define which node labels to use as domains
  466. # for CSI nodeplugins to advertise their domains
  467. # NOTE: the value here serves as an example and needs to be
  468. # updated with node labels that define domains of interest
  469. # CSI_TOPOLOGY_DOMAIN_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
  470. # Enable read affinity for RBD volumes. Recommended to
  471. # set to true if running kernel 5.8 or newer.
  472. CSI_ENABLE_READ_AFFINITY: "false"
  473. # CRUSH location labels define which node labels to use
  474. # as CRUSH location. This should correspond to the values set in
  475. # the CRUSH map.
  476. # Defaults to all the labels mentioned in
  477. # https://rook.io/docs/rook/latest/CRDs/Cluster/ceph-cluster-crd/#osd-topology
  478. # CSI_CRUSH_LOCATION_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
  479. # Whether to skip any attach operation altogether for CephCSI PVCs.
  480. # See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
  481. # If set to false it skips the volume attachments and makes the creation of pods using the CephCSI PVC fast.
  482. # **WARNING** It's highly discouraged to use this for RWO volumes. for RBD PVC it can cause data corruption,
  483. # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false
  484. # since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
  485. # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
  486. CSI_CEPHFS_ATTACH_REQUIRED: "true"
  487. CSI_RBD_ATTACH_REQUIRED: "true"
  488. CSI_NFS_ATTACH_REQUIRED: "true"
  489. # Rook Discover toleration. Will tolerate all taints with all keys.
  490. # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
  491. # DISCOVER_TOLERATIONS: |
  492. # - effect: NoSchedule
  493. # key: node-role.kubernetes.io/control-plane
  494. # operator: Exists
  495. # - effect: NoExecute
  496. # key: node-role.kubernetes.io/etcd
  497. # operator: Exists
  498. # (Optional) Rook Discover priority class name to set on the pod(s)
  499. # DISCOVER_PRIORITY_CLASS_NAME: "<PriorityClassName>"
  500. # (Optional) Discover Agent NodeAffinity.
  501. # DISCOVER_AGENT_NODE_AFFINITY: |
  502. # requiredDuringSchedulingIgnoredDuringExecution:
  503. # nodeSelectorTerms:
  504. # - matchExpressions:
  505. # - key: myKey
  506. # operator: DoesNotExist
  507. # (Optional) Discover Agent Pod Labels.
  508. # DISCOVER_AGENT_POD_LABELS: "key1=value1,key2=value2"
  509. # Disable automatic orchestration when new devices are discovered
  510. ROOK_DISABLE_DEVICE_HOTPLUG: "false"
  511. # The duration between discovering devices in the rook-discover daemonset.
  512. ROOK_DISCOVER_DEVICES_INTERVAL: "60m"
  513. # DISCOVER_DAEMON_RESOURCES: |
  514. # - name: DISCOVER_DAEMON_RESOURCES
  515. # resources:
  516. # limits:
  517. # cpu: 500m
  518. # memory: 512Mi
  519. # requests:
  520. # cpu: 100m
  521. # memory: 128Mi
  522. ---
  523. # OLM: BEGIN OPERATOR DEPLOYMENT
  524. apiVersion: apps/v1
  525. kind: Deployment
  526. metadata:
  527. name: rook-ceph-operator
  528. namespace: rook-ceph # namespace:operator
  529. labels:
  530. operator: rook
  531. storage-backend: ceph
  532. app.kubernetes.io/name: rook-ceph
  533. app.kubernetes.io/instance: rook-ceph
  534. app.kubernetes.io/component: rook-ceph-operator
  535. app.kubernetes.io/part-of: rook-ceph-operator
  536. spec:
  537. selector:
  538. matchLabels:
  539. app: rook-ceph-operator
  540. strategy:
  541. type: Recreate
  542. replicas: 1
  543. template:
  544. metadata:
  545. labels:
  546. app: rook-ceph-operator
  547. spec:
  548. tolerations:
  549. - effect: NoExecute
  550. key: node.kubernetes.io/unreachable
  551. operator: Exists
  552. tolerationSeconds: 5
  553. serviceAccountName: rook-ceph-system
  554. containers:
  555. - name: rook-ceph-operator
  556. image: rook/ceph:v1.13.1
  557. args: ["ceph", "operator"]
  558. securityContext:
  559. runAsNonRoot: true
  560. runAsUser: 2016
  561. runAsGroup: 2016
  562. capabilities:
  563. drop: ["ALL"]
  564. volumeMounts:
  565. - mountPath: /var/lib/rook
  566. name: rook-config
  567. - mountPath: /etc/ceph
  568. name: default-config-dir
  569. env:
  570. # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
  571. # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
  572. - name: ROOK_CURRENT_NAMESPACE_ONLY
  573. value: "false"
  574. # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
  575. # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
  576. # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
  577. - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
  578. value: "false"
  579. # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
  580. # In case of more than one regex, use comma to separate between them.
  581. # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
  582. # Add regex expression after putting a comma to blacklist a disk
  583. # If value is empty, the default regex will be used.
  584. - name: DISCOVER_DAEMON_UDEV_BLACKLIST
  585. value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
  586. # Time to wait until the node controller will move Rook pods to other
  587. # nodes after detecting an unreachable node.
  588. # Pods affected by this setting are:
  589. # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
  590. # The value used in this variable replaces the default value of 300 secs
  591. # added automatically by k8s as Toleration for
  592. # <node.kubernetes.io/unreachable>
  593. # The total amount of time to reschedule Rook pods in healthy nodes
  594. # before detecting a <not ready node> condition will be the sum of:
  595. # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
  596. # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
  597. - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
  598. value: "5"
  599. # The name of the node to pass with the downward API
  600. - name: NODE_NAME
  601. valueFrom:
  602. fieldRef:
  603. fieldPath: spec.nodeName
  604. # The pod name to pass with the downward API
  605. - name: POD_NAME
  606. valueFrom:
  607. fieldRef:
  608. fieldPath: metadata.name
  609. # The pod namespace to pass with the downward API
  610. - name: POD_NAMESPACE
  611. valueFrom:
  612. fieldRef:
  613. fieldPath: metadata.namespace
  614. # Recommended resource requests and limits, if desired
  615. #resources:
  616. # limits:
  617. # cpu: 500m
  618. # memory: 512Mi
  619. # requests:
  620. # cpu: 100m
  621. # memory: 128Mi
  622. # Uncomment it to run lib bucket provisioner in multithreaded mode
  623. #- name: LIB_BUCKET_PROVISIONER_THREADS
  624. # value: "5"
  625. # Uncomment it to run rook operator on the host network
  626. #hostNetwork: true
  627. volumes:
  628. - name: rook-config
  629. emptyDir: {}
  630. - name: default-config-dir
  631. emptyDir: {}
  632. # OLM: END OPERATOR DEPLOYMENT