################################################################################################################# # Define the settings for the rook-ceph cluster with common settings for a production cluster on top of bare metal. # This example expects three nodes, each with two available disks. Please modify it according to your environment. # See the documentation for more details on storage settings available. # For example, to create the cluster: # kubectl create -f crds.yaml -f common.yaml -f operator.yaml # kubectl create -f cluster-on-local-pvc.yaml ################################################################################################################# kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local0-0 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain # PV for mon must be a filesystem volume. volumeMode: Filesystem local: # If you want to use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. path: /data/ceph/ceph-0 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - k8smaster --- kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local0-1 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain # PV for OSD must be a block volume. volumeMode: Block local: path: /data/ceph/ceph-1 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - k8smaster --- kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local1-0 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain volumeMode: Filesystem local: path: /data/ceph/ceph-0 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - node01 --- kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local1-1 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain volumeMode: Block local: path: /data/ceph/ceph-1 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - node01 --- kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local2-0 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain volumeMode: Filesystem local: path: /data/ceph/ceph-0 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - node02 --- kind: PersistentVolume apiVersion: v1 metadata: name: ceph-local2-1 spec: storageClassName: local capacity: storage: 10Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain volumeMode: Block local: path: /data/ceph/ceph-1 nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - node02 --- apiVersion: ceph.rook.io/v1 kind: CephCluster metadata: name: rook-ceph namespace: rook-ceph # namespace:cluster spec: dataDirHostPath: /var/lib/rook mon: count: 3 allowMultiplePerNode: false volumeClaimTemplate: spec: storageClassName: local resources: requests: storage: 10Gi cephVersion: image: quay.io/ceph/ceph:v18.2.1 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false mgr: count: 1 modules: - name: pg_autoscaler enabled: true dashboard: enabled: true ssl: true crashCollector: disable: false storage: storageClassDeviceSets: - name: ceph-storage count: 3 portable: false tuneDeviceClass: true tuneFastDeviceClass: false encrypted: false placement: topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: ScheduleAnyway labelSelector: matchExpressions: - key: app operator: In values: - rook-ceph-osd - rook-ceph-osd-prepare preparePlacement: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - rook-ceph-osd - key: app operator: In values: - rook-ceph-osd-prepare topologyKey: kubernetes.io/hostname resources: # These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources # limits: # cpu: "500m" # memory: "4Gi" # requests: # cpu: "500m" # memory: "4Gi" volumeClaimTemplates: - metadata: name: ceph-sys # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph # annotations: # crushDeviceClass: hybrid spec: resources: requests: storage: 10Gi # IMPORTANT: Change the storage class depending on your environment storageClassName: local volumeMode: Block accessModes: - ReadWriteOnce # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement onlyApplyOSDPlacement: false resources: # prepareosd: # limits: # cpu: "200m" # memory: "200Mi" # requests: # cpu: "200m" # memory: "200Mi" priorityClassNames: mon: system-node-critical osd: system-node-critical mgr: system-cluster-critical disruptionManagement: managePodBudgets: true osdMaintenanceTimeout: 30 pgHealthCheckTimeout: 0