storageclass.yaml 4.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. apiVersion: ceph.rook.io/v1
  2. kind: CephBlockPool
  3. metadata:
  4. name: r3pool
  5. namespace: rook-ceph # namespace:cluster
  6. spec:
  7. failureDomain: host
  8. replicated:
  9. size: 3
  10. # Disallow setting pool with replica 1, this could lead to data loss without recovery.
  11. # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
  12. requireSafeReplicaSize: true
  13. # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
  14. # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
  15. #targetSizeRatio: .5
  16. ---
  17. apiVersion: storage.k8s.io/v1
  18. kind: StorageClass
  19. metadata:
  20. name: ceph
  21. # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
  22. provisioner: rook-ceph.rbd.csi.ceph.com
  23. parameters:
  24. # clusterID is the namespace where the rook cluster is running
  25. # If you change this namespace, also change the namespace below where the secret namespaces are defined
  26. clusterID: rook-ceph # namespace:cluster
  27. # If you want to use erasure coded pool with RBD, you need to create
  28. # two pools. one erasure coded and one replicated.
  29. # You need to specify the replicated pool here in the `pool` parameter, it is
  30. # used for the metadata of the images.
  31. # The erasure coded pool must be set as the `dataPool` parameter below.
  32. #dataPool: ec-data-pool
  33. pool: r3pool
  34. # (optional) mapOptions is a comma-separated list of map options.
  35. # For krbd options refer
  36. # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  37. # For nbd options refer
  38. # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  39. # mapOptions: lock_on_read,queue_depth=1024
  40. # (optional) unmapOptions is a comma-separated list of unmap options.
  41. # For krbd options refer
  42. # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  43. # For nbd options refer
  44. # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  45. # unmapOptions: force
  46. # (optional) Set it to true to encrypt each volume with encryption keys
  47. # from a key management system (KMS)
  48. # encrypted: "true"
  49. # (optional) Use external key management system (KMS) for encryption key by
  50. # specifying a unique ID matching a KMS ConfigMap. The ID is only used for
  51. # correlation to configmap entry.
  52. # encryptionKMSID: <kms-config-id>
  53. # RBD image format. Defaults to "2".
  54. imageFormat: "2"
  55. # RBD image features
  56. # Available for imageFormat: "2". Older releases of CSI RBD
  57. # support only the `layering` feature. The Linux kernel (KRBD) supports the
  58. # full complement of features as of 5.4
  59. # `layering` alone corresponds to Ceph's bitfield value of "2" ;
  60. # `layering` + `fast-diff` + `object-map` + `deep-flatten` + `exclusive-lock` together
  61. # correspond to Ceph's OR'd bitfield value of "63". Here we use
  62. # a symbolic, comma-separated format:
  63. # For 5.4 or later kernels:
  64. #imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
  65. # For 5.3 or earlier kernels:
  66. imageFeatures: layering
  67. # The secrets contain Ceph admin credentials. These are generated automatically by the operator
  68. # in the same namespace as the cluster.
  69. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  70. csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
  71. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  72. csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
  73. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  74. csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
  75. # Specify the filesystem type of the volume. If not specified, csi-provisioner
  76. # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
  77. # in hyperconverged settings where the volume is mounted on the same node as the osds.
  78. csi.storage.k8s.io/fstype: ext4
  79. # uncomment the following to use rbd-nbd as mounter on supported nodes
  80. # **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach
  81. # the PVC to application pod if nodeplugin pod restart.
  82. # Its still in Alpha support. Therefore, this option is not recommended for production use.
  83. #mounter: rbd-nbd
  84. allowVolumeExpansion: true
  85. reclaimPolicy: Retain
  86. ## defalut volumeBindingMode: Immediate
  87. #volumeBindingMode: WaitForFirstConsumer