ceph_base_block_test.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. Copyright 2016 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package integration
  14. import (
  15. "context"
  16. "fmt"
  17. "strconv"
  18. "strings"
  19. "time"
  20. "github.com/rook/rook/pkg/daemon/ceph/client"
  21. "github.com/rook/rook/pkg/operator/k8sutil"
  22. "github.com/rook/rook/tests/framework/clients"
  23. "github.com/rook/rook/tests/framework/installer"
  24. "github.com/rook/rook/tests/framework/utils"
  25. "github.com/stretchr/testify/assert"
  26. "github.com/stretchr/testify/require"
  27. "github.com/stretchr/testify/suite"
  28. "k8s.io/apimachinery/pkg/api/errors"
  29. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  30. )
  31. func blockCSICloneTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, storageClassName string) {
  32. // create pvc and app
  33. pvcSize := "1Gi"
  34. pvcName := "parent-pvc"
  35. podName := "demo-pod"
  36. readOnly := false
  37. mountPoint := "/var/lib/test"
  38. logger.Infof("create a PVC")
  39. err := helper.BlockClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize)
  40. require.NoError(s.T(), err)
  41. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound")
  42. logger.Infof("bind PVC to application")
  43. err = helper.BlockClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly)
  44. assert.NoError(s.T(), err)
  45. logger.Infof("check pod is in running state")
  46. require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state")
  47. logger.Infof("Storage Mounted successfully")
  48. // write data to pvc get the checksum value
  49. logger.Infof("write data to pvc")
  50. cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint)
  51. resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd)
  52. require.NoError(s.T(), err)
  53. pvcChecksum := strings.Fields(resp)
  54. require.Equal(s.T(), len(pvcChecksum), 2)
  55. clonePVCName := "clone-pvc"
  56. logger.Infof("create a new pvc from pvc")
  57. err = helper.BlockClient.CreatePVCClone(defaultNamespace, clonePVCName, pvcName, storageClassName, "ReadWriteOnce", pvcSize)
  58. require.NoError(s.T(), err)
  59. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, clonePVCName), "Make sure PVC is Bound")
  60. clonePodName := "clone-pod"
  61. logger.Infof("bind PVC clone to application")
  62. err = helper.BlockClient.CreatePod(clonePodName, clonePVCName, defaultNamespace, mountPoint, readOnly)
  63. assert.NoError(s.T(), err)
  64. logger.Infof("check pod is in running state")
  65. require.True(s.T(), k8sh.IsPodRunning(clonePodName, defaultNamespace), "make sure pod is in running state")
  66. logger.Infof("Storage Mounted successfully")
  67. // get the checksum of the data and validate it
  68. logger.Infof("check md5sum of both pvc and clone data is same")
  69. cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint)
  70. resp, err = k8sh.RunCommandInPod(defaultNamespace, clonePodName, cmd)
  71. require.NoError(s.T(), err)
  72. clonePVCChecksum := strings.Fields(resp)
  73. require.Equal(s.T(), len(clonePVCChecksum), 2)
  74. // compare the checksum value and verify the values are equal
  75. assert.Equal(s.T(), clonePVCChecksum[0], pvcChecksum[0])
  76. // delete clone PVC and app
  77. logger.Infof("delete clone pod")
  78. err = k8sh.DeletePod(k8sutil.DefaultNamespace, clonePodName)
  79. require.NoError(s.T(), err)
  80. logger.Infof("delete clone pvc")
  81. err = helper.BlockClient.DeletePVC(defaultNamespace, clonePVCName)
  82. assertNoErrorUnlessNotFound(s, err)
  83. assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, clonePVCName))
  84. // delete the parent PVC and app
  85. err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName)
  86. require.NoError(s.T(), err)
  87. logger.Infof("delete parent pvc")
  88. err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName)
  89. assertNoErrorUnlessNotFound(s, err)
  90. assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName))
  91. }
  92. func blockCSISnapshotTest(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, storageClassName, namespace string) {
  93. logger.Infof("install snapshot CRD")
  94. err := k8sh.CreateSnapshotCRD()
  95. require.NoError(s.T(), err)
  96. logger.Infof("install snapshot controller")
  97. err = k8sh.CreateSnapshotController()
  98. require.NoError(s.T(), err)
  99. // cleanup the CRD and controller in defer to make sure the CRD and
  100. // controller are removed as filesystem test also install CRD and controller.
  101. defer func() {
  102. logger.Infof("delete snapshot-controller")
  103. err = k8sh.DeleteSnapshotController()
  104. require.NoError(s.T(), err)
  105. logger.Infof("delete snapshot CRD")
  106. err = k8sh.DeleteSnapshotCRD()
  107. require.NoError(s.T(), err)
  108. }()
  109. logger.Infof("check snapshot controller is running")
  110. err = k8sh.WaitForSnapshotController(15)
  111. require.NoError(s.T(), err)
  112. // create snapshot class
  113. snapshotDeletePolicy := "Delete"
  114. snapshotClassName := "snapshot-testing"
  115. logger.Infof("create snapshotclass")
  116. err = helper.BlockClient.CreateSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace)
  117. require.NoError(s.T(), err)
  118. // create pvc and app
  119. pvcSize := "1Gi"
  120. pvcName := "snap-pvc"
  121. podName := "demo-pod"
  122. readOnly := false
  123. mountPoint := "/var/lib/test"
  124. logger.Infof("create a PVC")
  125. err = helper.BlockClient.CreatePVC(defaultNamespace, pvcName, storageClassName, "ReadWriteOnce", pvcSize)
  126. require.NoError(s.T(), err)
  127. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, pvcName), "Make sure PVC is Bound")
  128. logger.Infof("bind PVC to application")
  129. err = helper.BlockClient.CreatePod(podName, pvcName, defaultNamespace, mountPoint, readOnly)
  130. assert.NoError(s.T(), err)
  131. logger.Infof("check pod is in running state")
  132. require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure pod is in running state")
  133. logger.Infof("Storage Mounted successfully")
  134. // write data to pvc get the checksum value
  135. logger.Infof("write data to pvc")
  136. cmd := fmt.Sprintf("dd if=/dev/zero of=%s/file.out bs=1MB count=10 status=none conv=fsync && md5sum %s/file.out", mountPoint, mountPoint)
  137. resp, err := k8sh.RunCommandInPod(defaultNamespace, podName, cmd)
  138. require.NoError(s.T(), err)
  139. pvcChecksum := strings.Fields(resp)
  140. require.Equal(s.T(), len(pvcChecksum), 2)
  141. // create a snapshot
  142. snapshotName := "rbd-pvc-snapshot"
  143. logger.Infof("create a snapshot from pvc")
  144. err = helper.BlockClient.CreateSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace)
  145. require.NoError(s.T(), err)
  146. restorePVCName := "restore-block-pvc"
  147. // check snapshot is in ready state
  148. ready, err := k8sh.CheckSnapshotISReadyToUse(snapshotName, defaultNamespace, 15)
  149. require.NoError(s.T(), err)
  150. require.True(s.T(), ready, "make sure snapshot is in ready state")
  151. // create restore from snapshot and bind it to app
  152. logger.Infof("restore pvc to a new snapshot")
  153. err = helper.BlockClient.CreatePVCRestore(defaultNamespace, restorePVCName, snapshotName, storageClassName, "ReadWriteOnce", pvcSize)
  154. require.NoError(s.T(), err)
  155. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, restorePVCName), "Make sure PVC is Bound")
  156. restorePodName := "restore-pod"
  157. logger.Infof("bind PVC Restore to application")
  158. err = helper.BlockClient.CreatePod(restorePodName, restorePVCName, defaultNamespace, mountPoint, readOnly)
  159. assert.NoError(s.T(), err)
  160. logger.Infof("check pod is in running state")
  161. require.True(s.T(), k8sh.IsPodRunning(restorePodName, defaultNamespace), "make sure pod is in running state")
  162. logger.Infof("Storage Mounted successfully")
  163. // get the checksum of the data and validate it
  164. logger.Infof("check md5sum of both pvc and restore data is same")
  165. cmd = fmt.Sprintf("md5sum %s/file.out", mountPoint)
  166. resp, err = k8sh.RunCommandInPod(defaultNamespace, restorePodName, cmd)
  167. require.NoError(s.T(), err)
  168. restorePVCChecksum := strings.Fields(resp)
  169. require.Equal(s.T(), len(restorePVCChecksum), 2)
  170. // compare the checksum value and verify the values are equal
  171. assert.Equal(s.T(), restorePVCChecksum[0], pvcChecksum[0])
  172. // delete clone PVC and app
  173. logger.Infof("delete restore pod")
  174. err = k8sh.DeletePod(k8sutil.DefaultNamespace, restorePodName)
  175. require.NoError(s.T(), err)
  176. logger.Infof("delete restore pvc")
  177. err = helper.BlockClient.DeletePVC(defaultNamespace, restorePVCName)
  178. assertNoErrorUnlessNotFound(s, err)
  179. assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, restorePVCName))
  180. // delete the snapshot
  181. logger.Infof("delete snapshot")
  182. err = helper.BlockClient.DeleteSnapshot(snapshotName, pvcName, snapshotClassName, defaultNamespace)
  183. require.NoError(s.T(), err)
  184. logger.Infof("delete application pod")
  185. // delete the parent PVC and app
  186. err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName)
  187. require.NoError(s.T(), err)
  188. logger.Infof("delete parent pvc")
  189. err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName)
  190. assertNoErrorUnlessNotFound(s, err)
  191. assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, pvcName))
  192. logger.Infof("delete snapshotclass")
  193. err = helper.BlockClient.DeleteSnapshotClass(snapshotClassName, snapshotDeletePolicy, namespace)
  194. require.NoError(s.T(), err)
  195. }
  196. // Smoke Test for Block Storage - Test check the following operations on Block Storage in order
  197. // Create,Mount,Write,Read,Expand,Unmount and Delete.
  198. func runBlockCSITest(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, namespace string) {
  199. podName := "block-test"
  200. poolName := "replicapool"
  201. storageClassName := "rook-ceph-block"
  202. blockName := "block-pv-claim"
  203. podNameWithPVRetained := "block-test-retained"
  204. poolNameRetained := "replicapoolretained"
  205. storageClassNameRetained := "rook-ceph-block-retained"
  206. blockNameRetained := "block-pv-claim-retained"
  207. clusterInfo := client.AdminTestClusterInfo(namespace)
  208. defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName, true)
  209. defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolNameRetained, storageClassNameRetained, blockNameRetained, podNameWithPVRetained, true)
  210. logger.Infof("Block Storage End to End Integration Test - create, mount, write to, read from, and unmount")
  211. logger.Infof("Running on Rook Cluster %s", namespace)
  212. logger.Infof("Step 0 : Get Initial List Block")
  213. initBlockImages, _ := helper.BlockClient.ListAllImages(clusterInfo)
  214. assert.Equal(s.T(), 0, len(initBlockImages), "there should not already be any images in the pool")
  215. logger.Infof("step 1: Create block storage")
  216. err := helper.BlockClient.CreatePoolAndStorageClass(defaultNamespace, poolName, storageClassName, "Delete")
  217. require.NoError(s.T(), err)
  218. err = helper.BlockClient.CreatePVC(defaultNamespace, blockName, storageClassName, "ReadWriteOnce", "1M")
  219. require.NoError(s.T(), err)
  220. require.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a new block is created")
  221. err = helper.BlockClient.CreatePoolAndStorageClass(defaultNamespace, poolNameRetained, storageClassNameRetained, "Retain")
  222. require.NoError(s.T(), err)
  223. err = helper.BlockClient.CreatePVC(defaultNamespace, blockNameRetained, storageClassNameRetained, "ReadWriteOnce", "1M")
  224. require.NoError(s.T(), err)
  225. require.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 2), "Make sure another new block is created")
  226. logger.Infof("Block Storage created successfully")
  227. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockName), "Make sure PVC is Bound")
  228. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockNameRetained), "Make sure PVC with reclaimPolicy:Retain is Bound")
  229. logger.Infof("step 2: Mount block storage")
  230. createPodWithBlock(helper, k8sh, s, namespace, storageClassName, podName, blockName)
  231. createPodWithBlock(helper, k8sh, s, namespace, storageClassName, podNameWithPVRetained, blockNameRetained)
  232. logger.Infof("step 3: Write to block storage")
  233. message := "Smoke Test Data for Block storage"
  234. filename := "bsFile1"
  235. err = k8sh.WriteToPod("", podName, filename, message)
  236. assert.NoError(s.T(), err)
  237. logger.Infof("Write to Block storage successfully")
  238. logger.Infof("step 4: Read from block storage")
  239. err = k8sh.ReadFromPod("", podName, filename, message)
  240. assert.NoError(s.T(), err)
  241. logger.Infof("Read from Block storage successfully")
  242. logger.Infof("step 5: Restart the OSDs to confirm they are still healthy after restart")
  243. restartOSDPods(k8sh, s, namespace)
  244. logger.Infof("step 6: Read from block storage again")
  245. err = k8sh.ReadFromPod("", podName, filename, message)
  246. assert.NoError(s.T(), err)
  247. logger.Infof("Read from Block storage successfully")
  248. logger.Infof("step 7: Mount same block storage on a different pod. Should not be allowed")
  249. otherPod := "block-test2"
  250. err = helper.BlockClient.CreateClientPod(getCSIBlockPodDefinition(otherPod, blockName, defaultNamespace, storageClassName, false))
  251. assert.NoError(s.T(), err)
  252. // ** FIX: WHY IS THE RWO VOLUME NOT BEING FENCED??? The second pod is starting successfully with the same PVC
  253. //require.True(s.T(), k8sh.IsPodInError(otherPod, defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure block-test2 pod errors out while mounting the volume")
  254. //logger.Infof("Block Storage successfully fenced")
  255. logger.Infof("step 8: Delete fenced pod")
  256. err = k8sh.DeletePod(k8sutil.DefaultNamespace, otherPod)
  257. require.NoError(s.T(), err)
  258. require.True(s.T(), k8sh.IsPodTerminated(otherPod, defaultNamespace), "make sure block-test2 pod is terminated")
  259. logger.Infof("Fenced pod deleted successfully")
  260. logger.Infof("step 9: Unmount block storage")
  261. err = k8sh.DeletePod(k8sutil.DefaultNamespace, podName)
  262. require.NoError(s.T(), err)
  263. err = k8sh.DeletePod(k8sutil.DefaultNamespace, podNameWithPVRetained)
  264. require.NoError(s.T(), err)
  265. require.True(s.T(), k8sh.IsPodTerminated(podName, defaultNamespace), "make sure block-test pod is terminated")
  266. require.True(s.T(), k8sh.IsPodTerminated(podNameWithPVRetained, defaultNamespace), "make sure block-test-retained pod is terminated")
  267. logger.Infof("Block Storage unmounted successfully")
  268. logger.Infof("step 10: Deleting block storage")
  269. deletePVC(helper, k8sh, s, clusterInfo, blockName, "Delete")
  270. deletePVC(helper, k8sh, s, clusterInfo, blockNameRetained, "Retain")
  271. logger.Infof("step 11: Delete storage classes and pools")
  272. err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolName)
  273. assert.NoError(s.T(), err)
  274. err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolNameRetained)
  275. assert.NoError(s.T(), err)
  276. err = helper.BlockClient.DeleteStorageClass(storageClassName)
  277. assert.NoError(s.T(), err)
  278. err = helper.BlockClient.DeleteStorageClass(storageClassNameRetained)
  279. assert.NoError(s.T(), err)
  280. }
  281. func deletePVC(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterInfo *client.ClusterInfo, pvcName, retainPolicy string) {
  282. pvName, err := k8sh.GetPVCVolumeName(defaultNamespace, pvcName)
  283. assert.NoError(s.T(), err)
  284. pv, err := k8sh.GetPV(pvName)
  285. require.NoError(s.T(), err)
  286. logger.Infof("deleting ")
  287. err = helper.BlockClient.DeletePVC(defaultNamespace, pvcName)
  288. assert.NoError(s.T(), err)
  289. assert.Equal(s.T(), retainPolicy, string((*pv).Spec.PersistentVolumeReclaimPolicy))
  290. if retainPolicy == "Delete" {
  291. assert.True(s.T(), retryPVCheck(k8sh, pvName, false, ""))
  292. logger.Infof("PV: %s deleted successfully", pvName)
  293. assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a block is deleted")
  294. logger.Infof("Block Storage deleted successfully")
  295. } else {
  296. assert.True(s.T(), retryPVCheck(k8sh, pvName, true, "Released"))
  297. assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 1), "Make sure a block is retained")
  298. logger.Infof("Block Storage retained")
  299. _, err = k8sh.Kubectl("delete", "pv", pvName)
  300. assert.NoError(s.T(), err)
  301. }
  302. }
  303. func createPodWithBlock(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterNamespace, storageClassName, podName, pvcName string) {
  304. err := helper.BlockClient.CreateClientPod(getCSIBlockPodDefinition(podName, pvcName, defaultNamespace, storageClassName, false))
  305. assert.NoError(s.T(), err)
  306. require.True(s.T(), k8sh.IsPodRunning(podName, defaultNamespace), "make sure block-test pod is in running state")
  307. logger.Infof("Block Storage Mounted successfully")
  308. }
  309. func restartOSDPods(k8sh *utils.K8sHelper, s *suite.Suite, namespace string) {
  310. ctx := context.TODO()
  311. osdLabel := "app=rook-ceph-osd"
  312. // Delete the osd pod(s)
  313. logger.Infof("Deleting osd pod(s)")
  314. pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: osdLabel})
  315. assert.NoError(s.T(), err)
  316. for _, pod := range pods.Items {
  317. options := metav1.DeleteOptions{}
  318. err := k8sh.Clientset.CoreV1().Pods(namespace).Delete(ctx, pod.Name, options)
  319. assert.NoError(s.T(), err)
  320. }
  321. for _, pod := range pods.Items {
  322. logger.Infof("Waiting for osd pod %s to be deleted", pod.Name)
  323. deleted := k8sh.WaitUntilPodIsDeleted(pod.Name, namespace)
  324. assert.True(s.T(), deleted)
  325. }
  326. // Wait for the new pods to run
  327. logger.Infof("Waiting for new osd pod to run")
  328. err = k8sh.WaitForLabeledPodsToRun(osdLabel, namespace)
  329. assert.NoError(s.T(), err)
  330. }
  331. func runBlockCSITestLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, settings *installer.TestCephSettings) {
  332. logger.Infof("Block Storage End to End Integration Test - create storageclass,pool and pvc")
  333. logger.Infof("Running on Rook Cluster %s", settings.Namespace)
  334. clusterInfo := client.AdminTestClusterInfo(settings.Namespace)
  335. poolName := "rookpool"
  336. storageClassName := "rook-ceph-block-lite"
  337. blockName := "test-block-claim-lite"
  338. podName := "test-pod-lite"
  339. defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName, true)
  340. setupBlockLite(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName)
  341. blockCSISnapshotTest(helper, k8sh, s, storageClassName, settings.Namespace)
  342. blockCSICloneTest(helper, k8sh, s, storageClassName)
  343. }
  344. func setupBlockLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterInfo *client.ClusterInfo,
  345. poolName, storageClassName, blockName string) {
  346. // Check initial number of blocks
  347. initialBlocks, err := helper.BlockClient.ListAllImages(clusterInfo)
  348. require.NoError(s.T(), err)
  349. initBlockCount := len(initialBlocks)
  350. assert.Equal(s.T(), 0, initBlockCount, "why is there already a block image in the new pool?")
  351. logger.Infof("step : Create Pool,StorageClass and PVC")
  352. err = helper.BlockClient.CreatePoolAndStorageClass(defaultNamespace, poolName, storageClassName, "Delete")
  353. require.NoError(s.T(), err)
  354. createAndWaitForPVC(helper, k8sh, s, clusterInfo, storageClassName, blockName)
  355. }
  356. func createAndWaitForPVC(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterInfo *client.ClusterInfo,
  357. storageClassName, blockName string) {
  358. err := helper.BlockClient.CreatePVC(defaultNamespace, blockName, storageClassName, "ReadWriteOnce", "1M")
  359. require.NoError(s.T(), err)
  360. require.True(s.T(), k8sh.WaitUntilPVCIsBound(defaultNamespace, blockName))
  361. // Make sure new block is created
  362. b, err := helper.BlockClient.ListAllImages(clusterInfo)
  363. assert.NoError(s.T(), err)
  364. assert.Equal(s.T(), 1, len(b), "Make sure new block image is created")
  365. }
  366. func deleteBlockLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterInfo *client.ClusterInfo, poolName, storageClassName, blockName string, requireBlockImagesRemoved bool) {
  367. logger.Infof("deleteBlockLite: cleaning up after test")
  368. // Delete pvc and storageclass
  369. err := helper.BlockClient.DeletePVC(defaultNamespace, blockName)
  370. assertNoErrorUnlessNotFound(s, err)
  371. assert.True(s.T(), k8sh.WaitUntilPVCIsDeleted(defaultNamespace, blockName))
  372. if requireBlockImagesRemoved {
  373. assert.NoError(s.T(), retryBlockImageCountCheck(helper, clusterInfo, 0), "Make sure block images were deleted")
  374. }
  375. err = helper.PoolClient.DeletePool(helper.BlockClient, clusterInfo, poolName)
  376. assertNoErrorUnlessNotFound(s, err)
  377. err = helper.BlockClient.DeleteStorageClass(storageClassName)
  378. assertNoErrorUnlessNotFound(s, err)
  379. checkPoolDeleted(helper, s, clusterInfo.Namespace, poolName)
  380. }
  381. func assertNoErrorUnlessNotFound(s *suite.Suite, err error) {
  382. if err == nil || errors.IsNotFound(err) {
  383. return
  384. }
  385. assert.NoError(s.T(), err)
  386. }
  387. func checkPoolDeleted(helper *clients.TestClient, s *suite.Suite, namespace, name string) {
  388. // only retry once to see if the pool was deleted
  389. for i := 0; i < 3; i++ {
  390. found, err := helper.PoolClient.CephPoolExists(namespace, name)
  391. if err != nil {
  392. // try again on failure since the pool may have been in an unexpected state while deleting
  393. logger.Warningf("error getting pools. %+v", err)
  394. } else if !found {
  395. logger.Infof("pool %s is deleted", name)
  396. return
  397. }
  398. logger.Infof("pool %s still exists", name)
  399. time.Sleep(time.Second * utils.RetryInterval)
  400. }
  401. // this is not an assert in order to improve reliability of the tests
  402. logger.Errorf("pool %s was not deleted", name)
  403. }
  404. func blockTestDataCleanUp(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, clusterInfo *client.ClusterInfo, poolname, storageclassname, blockname, podName string, requireBlockImagesRemoved bool) {
  405. logger.Infof("Cleaning up block storage")
  406. err := k8sh.DeletePod(k8sutil.DefaultNamespace, podName)
  407. if err != nil {
  408. logger.Errorf("failed to delete pod. %v", err)
  409. }
  410. deleteBlockLite(helper, k8sh, s, clusterInfo, poolname, storageclassname, blockname, requireBlockImagesRemoved)
  411. }
  412. // periodically checking if block image count has changed to expected value
  413. // When creating pvc in k8s platform, it may take some time for the block Image to be bounded
  414. func retryBlockImageCountCheck(helper *clients.TestClient, clusterInfo *client.ClusterInfo, expectedImageCount int) error {
  415. for i := 0; i < utils.RetryLoop; i++ {
  416. blockImages, err := helper.BlockClient.ListAllImages(clusterInfo)
  417. if err != nil {
  418. return err
  419. }
  420. if expectedImageCount == len(blockImages) {
  421. return nil
  422. }
  423. logger.Infof("Waiting for block image count to reach %d. current=%d. %+v", expectedImageCount, len(blockImages), blockImages)
  424. time.Sleep(time.Second * utils.RetryInterval)
  425. }
  426. return fmt.Errorf("timed out waiting for image count to reach %d", expectedImageCount)
  427. }
  428. func retryPVCheck(k8sh *utils.K8sHelper, name string, exists bool, status string) bool {
  429. for i := 0; i < utils.RetryLoop; i++ {
  430. pv, err := k8sh.GetPV(name)
  431. if err != nil {
  432. if !exists {
  433. return true
  434. }
  435. }
  436. if exists {
  437. if string((*pv).Status.Phase) == status {
  438. return true
  439. }
  440. }
  441. logger.Infof("Waiting for PV %q to have status %q with exists %t", name, status, exists)
  442. time.Sleep(time.Second * utils.RetryInterval)
  443. }
  444. return false
  445. }
  446. func getCSIBlockPodDefinition(podName, pvcName, namespace, storageClass string, readOnly bool) string {
  447. return `
  448. apiVersion: v1
  449. kind: Pod
  450. metadata:
  451. name: ` + podName + `
  452. namespace: ` + namespace + `
  453. spec:
  454. containers:
  455. - name: ` + podName + `
  456. image: busybox
  457. command:
  458. - sh
  459. - "-c"
  460. - "touch ` + utils.TestMountPath + `/csi.test && sleep 3600"
  461. imagePullPolicy: IfNotPresent
  462. env:
  463. volumeMounts:
  464. - mountPath: ` + utils.TestMountPath + `
  465. name: csivol
  466. volumes:
  467. - name: csivol
  468. persistentVolumeClaim:
  469. claimName: ` + pvcName + `
  470. readOnly: ` + strconv.FormatBool(readOnly) + `
  471. restartPolicy: Never
  472. `
  473. }