ceph_upgrade_test.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. Copyright 2016 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package integration
  14. import (
  15. "context"
  16. "fmt"
  17. "strings"
  18. "testing"
  19. "time"
  20. v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
  21. "github.com/rook/rook/pkg/daemon/ceph/client"
  22. "github.com/rook/rook/pkg/operator/k8sutil"
  23. "github.com/rook/rook/tests/framework/clients"
  24. "github.com/rook/rook/tests/framework/installer"
  25. "github.com/rook/rook/tests/framework/utils"
  26. "github.com/stretchr/testify/assert"
  27. "github.com/stretchr/testify/require"
  28. "github.com/stretchr/testify/suite"
  29. )
  30. const (
  31. rbdPodName = "test-pod-upgrade"
  32. operatorContainer = "rook-ceph-operator"
  33. blockName = "block-claim-upgrade"
  34. bucketPrefix = "generate-me" // use generated bucket name for this test
  35. simpleTestMessage = "my simple message"
  36. )
  37. // ************************************************
  38. // *** Major scenarios tested by the UpgradeSuite ***
  39. // Setup
  40. // - Initially create a cluster from the previous minor release
  41. // - Upgrade to the current build of Rook to verify functionality after upgrade
  42. // - Test basic usage of block, object, and file after upgrade
  43. // Monitors
  44. // - One mon in the cluster
  45. // ************************************************
  46. func TestCephUpgradeSuite(t *testing.T) {
  47. s := new(UpgradeSuite)
  48. defer func(s *UpgradeSuite) {
  49. HandlePanics(recover(), s.TearDownSuite, s.T)
  50. }(s)
  51. suite.Run(t, s)
  52. }
  53. type UpgradeSuite struct {
  54. suite.Suite
  55. helper *clients.TestClient
  56. k8sh *utils.K8sHelper
  57. settings *installer.TestCephSettings
  58. installer *installer.CephInstaller
  59. namespace string
  60. }
  61. func (s *UpgradeSuite) SetupSuite() {
  62. // All setup is in baseSetup()
  63. }
  64. func (s *UpgradeSuite) TearDownSuite() {
  65. s.installer.UninstallRook()
  66. }
  67. func (s *UpgradeSuite) baseSetup(useHelm bool, initialCephVersion v1.CephVersionSpec) {
  68. s.namespace = "upgrade"
  69. s.settings = &installer.TestCephSettings{
  70. ClusterName: s.namespace,
  71. Namespace: s.namespace,
  72. OperatorNamespace: installer.SystemNamespace(s.namespace),
  73. UseHelm: useHelm,
  74. RetainHelmDefaultStorageCRs: true,
  75. UsePVC: false,
  76. Mons: 1,
  77. EnableDiscovery: true,
  78. SkipClusterCleanup: true,
  79. RookVersion: installer.Version1_12,
  80. CephVersion: initialCephVersion,
  81. }
  82. s.installer, s.k8sh = StartTestCluster(s.T, s.settings)
  83. s.helper = clients.CreateTestClient(s.k8sh, s.installer.Manifests)
  84. }
  85. func (s *UpgradeSuite) TestUpgradeRook() {
  86. s.testUpgrade(false, installer.QuincyVersion)
  87. }
  88. func (s *UpgradeSuite) TestUpgradeHelm() {
  89. s.testUpgrade(true, installer.QuincyVersion)
  90. }
  91. func (s *UpgradeSuite) testUpgrade(useHelm bool, initialCephVersion v1.CephVersionSpec) {
  92. s.baseSetup(useHelm, initialCephVersion)
  93. objectUserID := "upgraded-user"
  94. preFilename := "pre-upgrade-file"
  95. numOSDs, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectUserID, preFilename)
  96. clusterInfo := client.AdminTestClusterInfo(s.namespace)
  97. requireBlockImagesRemoved := false
  98. defer func() {
  99. blockTestDataCleanUp(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolName, installer.BlockPoolSCName, blockName, rbdPodName, requireBlockImagesRemoved)
  100. cleanupFilesystemConsumer(s.helper, s.k8sh, &s.Suite, s.namespace, filePodName)
  101. cleanupFilesystem(s.helper, s.k8sh, &s.Suite, s.namespace, installer.FilesystemName)
  102. _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID)
  103. _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false)
  104. _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete")
  105. objectStoreCleanUp(&s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName)
  106. }()
  107. // Delete Object-SC before upgrade test (https://github.com/rook/rook/issues/10153)
  108. _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete")
  109. //
  110. // Upgrade Rook from v1.12 to master
  111. //
  112. logger.Infof("*** UPGRADING ROOK FROM %s to master ***", installer.Version1_12)
  113. s.gatherLogs(s.settings.OperatorNamespace, "_before_master_upgrade")
  114. s.upgradeToMaster()
  115. s.verifyOperatorImage(installer.LocalBuildTag)
  116. s.verifyRookUpgrade(numOSDs)
  117. err := s.installer.WaitForToolbox(s.namespace)
  118. assert.NoError(s.T(), err)
  119. logger.Infof("Done with automatic upgrade from %s to master", installer.Version1_12)
  120. newFile := "post-upgrade-previous-to-master-file"
  121. s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
  122. rbdFilesToRead = append(rbdFilesToRead, newFile)
  123. cephfsFilesToRead = append(cephfsFilesToRead, newFile)
  124. checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
  125. // should be Bound after upgrade to Rook master
  126. // do not need retry b/c the OBC controller runs parallel to Rook-Ceph orchestration
  127. assert.True(s.T(), s.helper.BucketClient.CheckOBC(obcName, "bound"))
  128. logger.Infof("Verified upgrade from %s to master", installer.Version1_12)
  129. // SKIP the Ceph version upgrades for the helm test
  130. if s.settings.UseHelm {
  131. return
  132. }
  133. //
  134. // Upgrade from quincy to reef
  135. //
  136. logger.Infof("*** UPGRADING CEPH FROM QUINCY TO REEF ***")
  137. s.gatherLogs(s.settings.OperatorNamespace, "_before_reef_upgrade")
  138. s.upgradeCephVersion(installer.ReefVersion.Image, numOSDs)
  139. // Verify reading and writing to the test clients
  140. newFile = "post-reef-upgrade-file"
  141. s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
  142. logger.Infof("Verified upgrade from quincy to reef")
  143. checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
  144. }
  145. func (s *UpgradeSuite) TestUpgradeCephToQuincyDevel() {
  146. s.baseSetup(false, installer.QuincyVersion)
  147. objectUserID := "upgraded-user"
  148. preFilename := "pre-upgrade-file"
  149. s.settings.CephVersion = installer.QuincyVersion
  150. numOSDs, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectUserID, preFilename)
  151. clusterInfo := client.AdminTestClusterInfo(s.namespace)
  152. requireBlockImagesRemoved := false
  153. defer func() {
  154. blockTestDataCleanUp(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolName, installer.BlockPoolSCName, blockName, rbdPodName, requireBlockImagesRemoved)
  155. cleanupFilesystemConsumer(s.helper, s.k8sh, &s.Suite, s.namespace, filePodName)
  156. cleanupFilesystem(s.helper, s.k8sh, &s.Suite, s.namespace, installer.FilesystemName)
  157. _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID)
  158. _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false)
  159. _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete")
  160. objectStoreCleanUp(&s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName)
  161. }()
  162. //
  163. // Upgrade from quincy to quincy devel
  164. //
  165. logger.Infof("*** UPGRADING CEPH FROM QUINCY STABLE TO QUINCY DEVEL ***")
  166. s.gatherLogs(s.settings.OperatorNamespace, "_before_quincy_upgrade")
  167. s.upgradeCephVersion(installer.QuincyDevelVersion.Image, numOSDs)
  168. // Verify reading and writing to the test clients
  169. newFile := "post-quincy-upgrade-file"
  170. s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
  171. logger.Infof("Verified upgrade from quincy stable to quincy devel")
  172. checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
  173. }
  174. func (s *UpgradeSuite) TestUpgradeCephToReefDevel() {
  175. s.baseSetup(false, installer.ReefVersion)
  176. objectUserID := "upgraded-user"
  177. preFilename := "pre-upgrade-file"
  178. s.settings.CephVersion = installer.ReefVersion
  179. numOSDs, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectUserID, preFilename)
  180. clusterInfo := client.AdminTestClusterInfo(s.namespace)
  181. requireBlockImagesRemoved := false
  182. defer func() {
  183. blockTestDataCleanUp(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolName, installer.BlockPoolSCName, blockName, rbdPodName, requireBlockImagesRemoved)
  184. cleanupFilesystemConsumer(s.helper, s.k8sh, &s.Suite, s.namespace, filePodName)
  185. cleanupFilesystem(s.helper, s.k8sh, &s.Suite, s.namespace, installer.FilesystemName)
  186. _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID)
  187. _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false)
  188. _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete")
  189. objectStoreCleanUp(&s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName)
  190. }()
  191. //
  192. // Upgrade from reef to reef devel
  193. //
  194. logger.Infof("*** UPGRADING CEPH FROM REEF STABLE TO REEF DEVEL ***")
  195. s.gatherLogs(s.settings.OperatorNamespace, "_before_reef_upgrade")
  196. s.upgradeCephVersion(installer.ReefDevelVersion.Image, numOSDs)
  197. // Verify reading and writing to the test clients
  198. newFile := "post-reef-upgrade-file"
  199. s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
  200. logger.Infof("verified upgrade from reef stable to reef devel")
  201. checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
  202. }
  203. func (s *UpgradeSuite) deployClusterforUpgrade(objectUserID, preFilename string) (int, []string, []string) {
  204. //
  205. // Create block, object, and file storage before the upgrade
  206. // The helm chart already created these though.
  207. //
  208. clusterInfo := client.AdminTestClusterInfo(s.namespace)
  209. if !s.settings.UseHelm {
  210. logger.Infof("Initializing block before the upgrade")
  211. setupBlockLite(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolName, installer.BlockPoolSCName, blockName)
  212. } else {
  213. createAndWaitForPVC(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolSCName, blockName)
  214. }
  215. createPodWithBlock(s.helper, s.k8sh, &s.Suite, s.namespace, installer.BlockPoolSCName, rbdPodName, blockName)
  216. if !s.settings.UseHelm {
  217. // Create the filesystem
  218. logger.Infof("Initializing file before the upgrade")
  219. activeCount := 1
  220. createFilesystem(s.helper, s.k8sh, &s.Suite, s.settings, installer.FilesystemName, activeCount)
  221. assert.NoError(s.T(), s.helper.FSClient.CreateStorageClass(installer.FilesystemName, s.settings.OperatorNamespace, s.namespace, installer.FilesystemSCName))
  222. }
  223. // Start the file test client
  224. createFilesystemConsumerPod(s.helper, s.k8sh, &s.Suite, s.settings, installer.FilesystemName, installer.FilesystemSCName)
  225. if !s.settings.UseHelm {
  226. logger.Infof("Initializing object before the upgrade")
  227. deleteStore := false
  228. tls := false
  229. runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, installer.ObjectStoreName, 1, deleteStore, tls)
  230. }
  231. logger.Infof("Initializing object user before the upgrade")
  232. createCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, false, false)
  233. logger.Info("Initializing object bucket claim before the upgrade")
  234. cobErr := s.helper.BucketClient.CreateBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete")
  235. require.Nil(s.T(), cobErr)
  236. cobcErr := s.helper.BucketClient.CreateObc(obcName, installer.ObjectStoreName, bucketPrefix, maxObject, false)
  237. require.Nil(s.T(), cobcErr)
  238. created := utils.Retry(12, 2*time.Second, "OBC is created", func() bool {
  239. // do not check if bound here b/c this fails in Rook v1.4
  240. return s.helper.BucketClient.CheckOBC(obcName, "created")
  241. })
  242. require.True(s.T(), created)
  243. // verify that we're actually running the right pre-upgrade image
  244. s.verifyOperatorImage(installer.Version1_12)
  245. assert.NoError(s.T(), s.k8sh.WriteToPod("", rbdPodName, preFilename, simpleTestMessage))
  246. assert.NoError(s.T(), s.k8sh.ReadFromPod("", rbdPodName, preFilename, simpleTestMessage))
  247. // we will keep appending to this to continue verifying old files through the upgrades
  248. rbdFilesToRead := []string{preFilename}
  249. cephfsFilesToRead := []string{}
  250. // Get some info about the currently deployed OSDs to determine later if they are all updated
  251. osdDepList, err := k8sutil.GetDeployments(context.TODO(), s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd")
  252. require.NoError(s.T(), err)
  253. osdDeps := osdDepList.Items
  254. numOSDs := len(osdDeps) // there should be this many upgraded OSDs
  255. require.NotEqual(s.T(), 0, numOSDs)
  256. return numOSDs, rbdFilesToRead, cephfsFilesToRead
  257. }
  258. func (s *UpgradeSuite) gatherLogs(systemNamespace, testSuffix string) {
  259. // Gather logs before Ceph upgrade to help with debugging
  260. if installer.TestLogCollectionLevel() == "all" {
  261. s.k8sh.PrintPodDescribe(s.namespace)
  262. }
  263. n := strings.Replace(s.T().Name(), "/", "_", -1) + testSuffix
  264. s.installer.GatherAllRookLogs(n, systemNamespace, s.namespace)
  265. }
  266. func (s *UpgradeSuite) upgradeCephVersion(newCephImage string, numOSDs int) {
  267. osdDepList, err := k8sutil.GetDeployments(context.TODO(), s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd")
  268. require.NoError(s.T(), err)
  269. oldCephVersion := osdDepList.Items[0].Labels["ceph-version"] // upgraded OSDs should not have this version label
  270. _, err = s.k8sh.Kubectl("-n", s.namespace, "patch", "CephCluster", s.namespace, "--type=merge",
  271. "-p", fmt.Sprintf(`{"spec": {"cephVersion": {"image": "%s"}}}`, newCephImage))
  272. assert.NoError(s.T(), err)
  273. s.waitForUpgradedDaemons(oldCephVersion, "ceph-version", numOSDs, false)
  274. }
  275. func (s *UpgradeSuite) verifyOperatorImage(expectedImage string) {
  276. systemNamespace := installer.SystemNamespace(s.namespace)
  277. // verify that the operator spec is updated
  278. version, err := k8sutil.GetDeploymentImage(context.TODO(), s.k8sh.Clientset, systemNamespace, operatorContainer, operatorContainer)
  279. assert.NoError(s.T(), err)
  280. assert.Equal(s.T(), "rook/ceph:"+expectedImage, version)
  281. }
  282. func (s *UpgradeSuite) verifyRookUpgrade(numOSDs int) {
  283. // Get some info about the currently deployed mons to determine later if they are all updated
  284. monDepList, err := k8sutil.GetDeployments(context.TODO(), s.k8sh.Clientset, s.namespace, "app=rook-ceph-mon")
  285. require.NoError(s.T(), err)
  286. require.Equal(s.T(), s.settings.Mons, len(monDepList.Items), monDepList.Items)
  287. // Get some info about the currently deployed mgr to determine later if it is updated
  288. mgrDepList, err := k8sutil.GetDeployments(context.TODO(), s.k8sh.Clientset, s.namespace, "app=rook-ceph-mgr")
  289. require.NoError(s.T(), err)
  290. require.Equal(s.T(), 1, len(mgrDepList.Items))
  291. // Get some info about the currently deployed OSDs to determine later if they are all updated
  292. osdDepList, err := k8sutil.GetDeployments(context.TODO(), s.k8sh.Clientset, s.namespace, "app=rook-ceph-osd")
  293. require.NoError(s.T(), err)
  294. require.NotZero(s.T(), len(osdDepList.Items))
  295. require.Equal(s.T(), numOSDs, len(osdDepList.Items), osdDepList.Items)
  296. d := osdDepList.Items[0]
  297. oldRookVersion := d.Labels["rook-version"] // upgraded OSDs should not have this version label
  298. s.waitForUpgradedDaemons(oldRookVersion, "rook-version", numOSDs, true)
  299. }
  300. func (s *UpgradeSuite) waitForUpgradedDaemons(previousVersion, versionLabel string, numOSDs int, waitForMDS bool) {
  301. // wait for the mon(s) to be updated
  302. monsNotOldVersion := fmt.Sprintf("app=rook-ceph-mon,%s!=%s", versionLabel, previousVersion)
  303. err := s.k8sh.WaitForDeploymentCount(monsNotOldVersion, s.namespace, s.settings.Mons)
  304. require.NoError(s.T(), err, "mon(s) didn't update")
  305. err = s.k8sh.WaitForLabeledDeploymentsToBeReady(monsNotOldVersion, s.namespace)
  306. require.NoError(s.T(), err)
  307. // wait for the mgr to be updated
  308. mgrNotOldVersion := fmt.Sprintf("app=rook-ceph-mgr,%s!=%s", versionLabel, previousVersion)
  309. err = s.k8sh.WaitForDeploymentCount(mgrNotOldVersion, s.namespace, 1)
  310. require.NoError(s.T(), err, "mgr didn't update")
  311. err = s.k8sh.WaitForLabeledDeploymentsToBeReady(mgrNotOldVersion, s.namespace)
  312. require.NoError(s.T(), err)
  313. // wait for the osd pods to be updated
  314. osdsNotOldVersion := fmt.Sprintf("app=rook-ceph-osd,%s!=%s", versionLabel, previousVersion)
  315. err = s.k8sh.WaitForDeploymentCount(osdsNotOldVersion, s.namespace, numOSDs)
  316. require.NoError(s.T(), err, "osd(s) didn't update")
  317. err = s.k8sh.WaitForLabeledDeploymentsToBeReady(osdsNotOldVersion, s.namespace)
  318. require.NoError(s.T(), err)
  319. // wait for the mds pods to be updated
  320. // FIX: In v1.2 there was a race condition that can cause the MDS to not be updated, so we skip
  321. // the check for MDS upgrade in case it's just a ceph upgrade (no operator restart)
  322. if waitForMDS {
  323. mdsesNotOldVersion := fmt.Sprintf("app=rook-ceph-mds,%s!=%s", versionLabel, previousVersion)
  324. err = s.k8sh.WaitForDeploymentCount(mdsesNotOldVersion, s.namespace, 2 /* always expect 2 mdses */)
  325. require.NoError(s.T(), err)
  326. err = s.k8sh.WaitForLabeledDeploymentsToBeReady(mdsesNotOldVersion, s.namespace)
  327. require.NoError(s.T(), err)
  328. }
  329. rgwsNotOldVersion := fmt.Sprintf("app=rook-ceph-rgw,%s!=%s", versionLabel, previousVersion)
  330. err = s.k8sh.WaitForDeploymentCount(rgwsNotOldVersion, s.namespace, 1 /* always expect 1 rgw */)
  331. require.NoError(s.T(), err)
  332. err = s.k8sh.WaitForLabeledDeploymentsToBeReady(rgwsNotOldVersion, s.namespace)
  333. require.NoError(s.T(), err)
  334. // Give a few seconds for the daemons to settle down after the upgrade
  335. time.Sleep(5 * time.Second)
  336. }
  337. func (s *UpgradeSuite) verifyFilesAfterUpgrade(newFileToWrite string, rbdFilesToRead, cephFSFilesToRead []string) {
  338. retryCount := 5
  339. for _, file := range rbdFilesToRead {
  340. // test reading preexisting files in the pod with rbd mounted
  341. // There is some unreliability right after the upgrade when there is only one osd, so we will retry if needed
  342. assert.NoError(s.T(), s.k8sh.ReadFromPodRetry("", rbdPodName, file, simpleTestMessage, retryCount))
  343. }
  344. // test writing and reading a new file in the pod with rbd mounted
  345. assert.NoError(s.T(), s.k8sh.WriteToPodRetry("", rbdPodName, newFileToWrite, simpleTestMessage, retryCount))
  346. assert.NoError(s.T(), s.k8sh.ReadFromPodRetry("", rbdPodName, newFileToWrite, simpleTestMessage, retryCount))
  347. // wait for filesystem to be active
  348. clusterInfo := client.AdminTestClusterInfo(s.namespace)
  349. err := waitForFilesystemActive(s.k8sh, clusterInfo, installer.FilesystemName)
  350. require.NoError(s.T(), err)
  351. // test reading preexisting files in the pod with cephfs mounted
  352. for _, file := range cephFSFilesToRead {
  353. assert.NoError(s.T(), s.k8sh.ReadFromPodRetry(s.namespace, filePodName, file, simpleTestMessage, retryCount))
  354. }
  355. // test writing and reading a new file in the pod with cephfs mounted
  356. assert.NoError(s.T(), s.k8sh.WriteToPodRetry(s.namespace, filePodName, newFileToWrite, simpleTestMessage, retryCount))
  357. assert.NoError(s.T(), s.k8sh.ReadFromPodRetry(s.namespace, filePodName, newFileToWrite, simpleTestMessage, retryCount))
  358. }
  359. // UpgradeToMaster performs the steps necessary to upgrade a Rook v1.4 cluster to master. It does not
  360. // verify the upgrade but merely starts the upgrade process.
  361. func (s *UpgradeSuite) upgradeToMaster() {
  362. // Apply the CRDs for the latest master
  363. s.settings.RookVersion = installer.LocalBuildTag
  364. s.installer.Manifests = installer.NewCephManifests(s.settings)
  365. if s.settings.UseHelm {
  366. logger.Info("Requiring msgr2 during helm upgrade to test the port conversion from 6789 to 3300")
  367. s.settings.RequireMsgr2 = true
  368. // Upgrade the operator chart
  369. err := s.installer.UpgradeRookOperatorViaHelm()
  370. require.NoError(s.T(), err, "failed to upgrade the operator chart")
  371. err = s.installer.UpgradeRookCephClusterViaHelm()
  372. require.NoError(s.T(), err, "failed to upgrade the cluster chart")
  373. return
  374. }
  375. require.NoError(s.T(), s.k8sh.ResourceOperation("apply", s.installer.Manifests.GetCRDs(s.k8sh)))
  376. require.NoError(s.T(), s.k8sh.ResourceOperation("apply", s.installer.Manifests.GetCommon()))
  377. require.NoError(s.T(),
  378. s.k8sh.SetDeploymentVersion(s.settings.OperatorNamespace, operatorContainer, operatorContainer, installer.LocalBuildTag))
  379. require.NoError(s.T(), s.k8sh.ResourceOperation("apply", s.installer.Manifests.GetToolbox()))
  380. }