ceph_smoke_test.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. Copyright 2016 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package integration
  14. import (
  15. "context"
  16. "fmt"
  17. "strings"
  18. "testing"
  19. "time"
  20. "github.com/rook/rook/pkg/daemon/ceph/client"
  21. opcontroller "github.com/rook/rook/pkg/operator/ceph/controller"
  22. "github.com/rook/rook/tests/framework/clients"
  23. "github.com/rook/rook/tests/framework/installer"
  24. "github.com/rook/rook/tests/framework/utils"
  25. "github.com/stretchr/testify/assert"
  26. "github.com/stretchr/testify/require"
  27. "github.com/stretchr/testify/suite"
  28. appsv1 "k8s.io/api/apps/v1"
  29. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  30. )
  31. // ************************************************
  32. // *** Major scenarios tested by the SmokeSuite ***
  33. // Setup
  34. // - via the cluster CRD
  35. // Monitors
  36. // - Three mons in the cluster
  37. // - Failover of an unhealthy monitor
  38. // OSDs
  39. // - Bluestore running on devices
  40. // Block
  41. // - Mount/unmount a block device through the dynamic provisioner
  42. // - Fencing of the block device
  43. // - Read/write to the device
  44. // File system
  45. // - Create the file system via the CRD
  46. // - Mount/unmount a file system in pod
  47. // - Read/write to the file system
  48. // - Delete the file system
  49. // Object
  50. // - Create the object store via the CRD
  51. // - Create/delete buckets
  52. // - Create/delete users
  53. // - PUT/GET objects
  54. // - Quota limit wrt no of objects
  55. // ************************************************
  56. func TestCephSmokeSuite(t *testing.T) {
  57. s := new(SmokeSuite)
  58. defer func(s *SmokeSuite) {
  59. HandlePanics(recover(), s.TearDownSuite, s.T)
  60. }(s)
  61. suite.Run(t, s)
  62. }
  63. type SmokeSuite struct {
  64. suite.Suite
  65. helper *clients.TestClient
  66. settings *installer.TestCephSettings
  67. installer *installer.CephInstaller
  68. k8sh *utils.K8sHelper
  69. }
  70. func (s *SmokeSuite) SetupSuite() {
  71. namespace := "smoke-ns"
  72. s.settings = &installer.TestCephSettings{
  73. ClusterName: "smoke-cluster",
  74. Namespace: namespace,
  75. OperatorNamespace: installer.SystemNamespace(namespace),
  76. StorageClassName: installer.StorageClassName(),
  77. UseHelm: false,
  78. UsePVC: installer.UsePVC(),
  79. Mons: 3,
  80. SkipOSDCreation: false,
  81. ConnectionsEncrypted: true,
  82. ConnectionsCompressed: true,
  83. UseCrashPruner: true,
  84. EnableVolumeReplication: true,
  85. TestNFSCSI: true,
  86. ChangeHostName: true,
  87. RookVersion: installer.LocalBuildTag,
  88. CephVersion: installer.ReturnCephVersion(),
  89. }
  90. s.settings.ApplyEnvVars()
  91. s.installer, s.k8sh = StartTestCluster(s.T, s.settings)
  92. s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests)
  93. }
  94. func (s *SmokeSuite) AfterTest(suiteName, testName string) {
  95. s.installer.CollectOperatorLog(suiteName, testName)
  96. }
  97. func (s *SmokeSuite) TearDownSuite() {
  98. s.installer.UninstallRook()
  99. }
  100. func (s *SmokeSuite) TestCephNFS_SmokeTest() {
  101. runNFSFileE2ETest(s.helper, s.k8sh, &s.Suite, s.settings, "smoke-test-nfs")
  102. }
  103. func (s *SmokeSuite) TestBlockStorage_SmokeTest() {
  104. runBlockCSITest(s.helper, s.k8sh, &s.Suite, s.settings.Namespace)
  105. }
  106. func (s *SmokeSuite) TestFileStorage_SmokeTest() {
  107. preserveFilesystemOnDelete := true
  108. runFileE2ETest(s.helper, s.k8sh, &s.Suite, s.settings, "smoke-test-fs", preserveFilesystemOnDelete)
  109. }
  110. func (s *SmokeSuite) TestObjectStorage_SmokeTest() {
  111. if utils.IsPlatformOpenShift() {
  112. s.T().Skip("object store tests skipped on openshift")
  113. }
  114. storeName := "lite-store"
  115. deleteStore := true
  116. tls := false
  117. runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, storeName, 2, deleteStore, tls)
  118. }
  119. // Test to make sure all rook components are installed and Running
  120. func (s *SmokeSuite) TestARookClusterInstallation_SmokeTest() {
  121. checkIfRookClusterIsInstalled(&s.Suite, s.k8sh, s.settings.OperatorNamespace, s.settings.Namespace, 3)
  122. }
  123. // Smoke Test for Mon failover - Test check the following operations for the Mon failover in order
  124. // Delete mon pod, Wait for new mon pod
  125. func (s *SmokeSuite) TestMonFailover() {
  126. ctx := context.TODO()
  127. logger.Infof("Mon Failover Smoke Test")
  128. deployments, err := s.getNonCanaryMonDeployments()
  129. require.NoError(s.T(), err)
  130. require.Equal(s.T(), 3, len(deployments))
  131. // Scale down a mon so the operator won't trigger a reconcile
  132. monToKill := deployments[0].Name
  133. logger.Infof("Scaling down mon %s", monToKill)
  134. scale, err := s.k8sh.Clientset.AppsV1().Deployments(s.settings.Namespace).GetScale(ctx, monToKill, metav1.GetOptions{})
  135. assert.NoError(s.T(), err)
  136. scale.Spec.Replicas = 0
  137. _, err = s.k8sh.Clientset.AppsV1().Deployments(s.settings.Namespace).UpdateScale(ctx, monToKill, scale, metav1.UpdateOptions{})
  138. assert.NoError(s.T(), err)
  139. // Wait for the health check to start a new monitor
  140. for i := 0; i < 30; i++ {
  141. deployments, err := s.getNonCanaryMonDeployments()
  142. require.NoError(s.T(), err)
  143. var currentMons []string
  144. var originalMonDeployment *appsv1.Deployment
  145. for i, mon := range deployments {
  146. currentMons = append(currentMons, mon.Name)
  147. if mon.Name == monToKill {
  148. originalMonDeployment = &deployments[i]
  149. }
  150. }
  151. logger.Infof("mon deployments: %v", currentMons)
  152. // Check if the original mon was scaled up again
  153. // Depending on the state of the orchestration, the operator might trigger
  154. // re-creation of the deleted mon. In this case, consider the test successful
  155. // rather than wait for the failover which will never occur.
  156. if originalMonDeployment != nil && *originalMonDeployment.Spec.Replicas > 0 {
  157. logger.Infof("Original mon created again, no need to wait for mon failover")
  158. return
  159. }
  160. if len(deployments) == 3 && originalMonDeployment == nil {
  161. logger.Infof("Found a new monitor!")
  162. return
  163. }
  164. logger.Infof("Waiting for a new monitor to start and previous one to be deleted")
  165. time.Sleep(5 * time.Second)
  166. }
  167. require.Fail(s.T(), "giving up waiting for a new monitor")
  168. }
  169. // Smoke Test for pool Resizing
  170. func (s *SmokeSuite) TestPoolResize() {
  171. ctx := context.TODO()
  172. logger.Infof("Pool Resize Smoke Test")
  173. poolName := "testpool"
  174. err := s.helper.PoolClient.Create(poolName, s.settings.Namespace, 1)
  175. require.NoError(s.T(), err)
  176. poolFound := false
  177. clusterInfo := client.AdminTestClusterInfo(s.settings.Namespace)
  178. // Wait for pool to appear
  179. for i := 0; i < 10; i++ {
  180. pools, err := s.helper.PoolClient.ListCephPools(clusterInfo)
  181. require.NoError(s.T(), err)
  182. for _, p := range pools {
  183. if p.Name != poolName {
  184. continue
  185. }
  186. poolFound = true
  187. }
  188. if poolFound {
  189. break
  190. }
  191. logger.Infof("Waiting for pool to appear")
  192. time.Sleep(2 * time.Second)
  193. }
  194. require.Equal(s.T(), true, poolFound, "pool not found")
  195. err = s.helper.PoolClient.Update(poolName, s.settings.Namespace, 2)
  196. require.NoError(s.T(), err)
  197. poolResized := false
  198. // Wait for pool resize to happen
  199. for i := 0; i < 10; i++ {
  200. details, err := s.helper.PoolClient.GetCephPoolDetails(clusterInfo, poolName)
  201. require.NoError(s.T(), err)
  202. if details.Size > 1 {
  203. logger.Infof("pool %s size was updated", poolName)
  204. require.Equal(s.T(), 2, int(details.Size))
  205. poolResized = true
  206. // resize the pool back to 1 to avoid hangs around not having enough OSDs to satisfy rbd
  207. err = s.helper.PoolClient.Update(poolName, s.settings.Namespace, 1)
  208. require.NoError(s.T(), err)
  209. } else if poolResized && details.Size == 1 {
  210. logger.Infof("pool resized back to 1")
  211. break
  212. }
  213. logger.Debugf("pool %s size not updated yet. details: %+v", poolName, details)
  214. logger.Infof("Waiting for pool %s resize to happen", poolName)
  215. time.Sleep(2 * time.Second)
  216. }
  217. require.Equal(s.T(), true, poolResized, fmt.Sprintf("pool %s not found", poolName))
  218. // Verify the Kubernetes Secret has been created (bootstrap peer token)
  219. pool, err := s.k8sh.RookClientset.CephV1().CephBlockPools(s.settings.Namespace).Get(ctx, poolName, metav1.GetOptions{})
  220. assert.NoError(s.T(), err)
  221. if pool.Spec.Mirroring.Enabled {
  222. secretName := pool.Status.Info[opcontroller.RBDMirrorBootstrapPeerSecretName]
  223. assert.NotEmpty(s.T(), secretName)
  224. // now fetch the secret which contains the bootstrap peer token
  225. secret, err := s.k8sh.Clientset.CoreV1().Secrets(s.settings.Namespace).Get(ctx, secretName, metav1.GetOptions{})
  226. require.NoError(s.T(), err)
  227. assert.NotEmpty(s.T(), secret.Data["token"])
  228. }
  229. // clean up the pool
  230. err = s.helper.PoolClient.DeletePool(s.helper.BlockClient, clusterInfo, poolName)
  231. assert.NoError(s.T(), err)
  232. }
  233. // Smoke Test for Client CRD
  234. func (s *SmokeSuite) TestCreateClient() {
  235. logger.Infof("Create Client Smoke Test")
  236. clientName := "client1"
  237. caps := map[string]string{
  238. "mon": "allow rwx",
  239. "mgr": "allow rwx",
  240. "osd": "allow rwx",
  241. }
  242. clusterInfo := client.AdminTestClusterInfo(s.settings.Namespace)
  243. err := s.helper.UserClient.Create(clientName, s.settings.Namespace, caps)
  244. require.NoError(s.T(), err)
  245. clientFound := false
  246. for i := 0; i < 30; i++ {
  247. clients, _ := s.helper.UserClient.Get(clusterInfo, "client."+clientName)
  248. if clients != "" {
  249. clientFound = true
  250. }
  251. if clientFound {
  252. break
  253. }
  254. logger.Infof("Waiting for client to appear")
  255. time.Sleep(2 * time.Second)
  256. }
  257. assert.Equal(s.T(), true, clientFound, "client not found")
  258. logger.Infof("Update Client Smoke Test")
  259. newcaps := map[string]string{
  260. "mon": "allow r",
  261. "mgr": "allow rw",
  262. "osd": "allow *",
  263. }
  264. caps, _ = s.helper.UserClient.Update(clusterInfo, clientName, newcaps)
  265. assert.Equal(s.T(), "allow r", caps["mon"], "wrong caps")
  266. assert.Equal(s.T(), "allow rw", caps["mgr"], "wrong caps")
  267. assert.Equal(s.T(), "allow *", caps["osd"], "wrong caps")
  268. err = s.helper.UserClient.Delete(clientName, s.settings.Namespace)
  269. require.NoError(s.T(), err)
  270. }
  271. // Smoke Test for RBD Mirror CRD
  272. func (s *SmokeSuite) TestCreateRBDMirrorClient() {
  273. logger.Infof("Create rbd-mirror Smoke Test")
  274. rbdMirrorName := "my-rbd-mirror"
  275. err := s.helper.RBDMirrorClient.Create(s.settings.Namespace, rbdMirrorName, 1)
  276. require.NoError(s.T(), err)
  277. err = s.helper.RBDMirrorClient.Delete(s.settings.Namespace, rbdMirrorName)
  278. require.NoError(s.T(), err)
  279. }
  280. func (s *SmokeSuite) getNonCanaryMonDeployments() ([]appsv1.Deployment, error) {
  281. ctx := context.TODO()
  282. opts := metav1.ListOptions{LabelSelector: "app=rook-ceph-mon"}
  283. deployments, err := s.k8sh.Clientset.AppsV1().Deployments(s.settings.Namespace).List(ctx, opts)
  284. if err != nil {
  285. return nil, err
  286. }
  287. nonCanaryMonDeployments := []appsv1.Deployment{}
  288. for _, deployment := range deployments.Items {
  289. if !strings.HasSuffix(deployment.GetName(), "-canary") {
  290. nonCanaryMonDeployments = append(nonCanaryMonDeployments, deployment)
  291. }
  292. }
  293. return nonCanaryMonDeployments, nil
  294. }