filesystem_test.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /*
  2. Copyright 2016 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package client
  14. import (
  15. "encoding/json"
  16. "fmt"
  17. "testing"
  18. "time"
  19. "github.com/pkg/errors"
  20. "github.com/rook/rook/pkg/clusterd"
  21. exectest "github.com/rook/rook/pkg/util/exec/test"
  22. "github.com/stretchr/testify/assert"
  23. )
  24. const (
  25. // this JSON was generated from the mon_command "fs ls", ExecuteMonCommand(conn, map[string]interface{}{"prefix": "fs ls"})
  26. cephFilesystemListResponseRaw = `[{"name":"myfs1","metadata_pool":"myfs1-metadata","metadata_pool_id":2,"data_pool_ids":[1],"data_pools":["myfs1-data"]}]`
  27. // this JSON was generated from the mon_command "fs get", ExecuteMonCommand(conn, map[string]interface{}{"prefix": "fs get","fs_name": fsName,})
  28. cephFilesystemGetResponseRaw = `{"mdsmap":{"epoch":6,"flags":1,"ever_allowed_features":0,"explicitly_allowed_features":0,"created":"2016-11-30 08:35:06.416438","modified":"2016-11-30 08:35:06.416438","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"file layout v2"}},"max_mds":1,"in":[0],"up":{"mds_0":4107},"failed":[],"damaged":[],"stopped":[],"info":{"gid_4107":{"gid":4107,"name":"1","rank":0,"incarnation":4,"state":"up:active","state_seq":3,"addr":"127.0.0.1:6804\/2981621686","standby_for_rank":-1,"standby_for_fscid":-1,"standby_for_name":"","standby_replay":false,"export_targets":[],"features":1152921504336314367}},"data_pools":[1],"metadata_pool":2,"enabled":true,"fs_name":"myfs1","balancer":""},"id":1}`
  29. )
  30. func TestFilesystemListMarshal(t *testing.T) {
  31. var filesystems []CephFilesystem
  32. err := json.Unmarshal([]byte(cephFilesystemListResponseRaw), &filesystems)
  33. assert.Nil(t, err)
  34. // create the expected file systems listing object
  35. expectedFilesystems := []CephFilesystem{
  36. {
  37. Name: "myfs1",
  38. MetadataPool: "myfs1-metadata",
  39. MetadataPoolID: 2,
  40. DataPools: []string{"myfs1-data"},
  41. DataPoolIDs: []int{1}},
  42. }
  43. assert.Equal(t, expectedFilesystems, filesystems)
  44. }
  45. func TestFilesystemGetMarshal(t *testing.T) {
  46. var fs CephFilesystemDetails
  47. err := json.Unmarshal([]byte(cephFilesystemGetResponseRaw), &fs)
  48. assert.Nil(t, err)
  49. // create the expected file system details object
  50. expectedFS := CephFilesystemDetails{
  51. ID: 1,
  52. MDSMap: MDSMap{
  53. FilesystemName: "myfs1",
  54. Enabled: true,
  55. Root: 0,
  56. TableServer: 0,
  57. MaxMDS: 1,
  58. MetadataPool: 2,
  59. DataPools: []int{1},
  60. In: []int{0},
  61. Up: map[string]int{"mds_0": 4107},
  62. Failed: []int{},
  63. Damaged: []int{},
  64. Stopped: []int{},
  65. Info: map[string]MDSInfo{
  66. "gid_4107": {
  67. GID: 4107,
  68. Name: "1",
  69. Rank: 0,
  70. State: "up:active",
  71. Address: "127.0.0.1:6804/2981621686",
  72. },
  73. },
  74. },
  75. }
  76. assert.Equal(t, expectedFS, fs)
  77. }
  78. func TestFilesystemRemove(t *testing.T) {
  79. dataDeleted := false
  80. metadataDeleted := false
  81. crushDeleted := false
  82. executor := &exectest.MockExecutor{}
  83. context := &clusterd.Context{Executor: executor}
  84. fs := CephFilesystemDetails{
  85. ID: 1,
  86. MDSMap: MDSMap{
  87. FilesystemName: "myfs1",
  88. MetadataPool: 2,
  89. DataPools: []int{1},
  90. },
  91. }
  92. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  93. logger.Infof("Command: %s %v", command, args)
  94. if args[0] == "fs" {
  95. if args[1] == "get" {
  96. output, err := json.Marshal(fs)
  97. assert.Nil(t, err)
  98. return string(output), nil
  99. }
  100. if args[1] == "rm" {
  101. return "", nil
  102. }
  103. }
  104. if args[0] == "osd" {
  105. if args[1] == "lspools" {
  106. pools := []*CephStoragePoolSummary{
  107. {Name: "mydata", Number: 1},
  108. {Name: "mymetadata", Number: 2},
  109. }
  110. output, err := json.Marshal(pools)
  111. assert.Nil(t, err)
  112. return string(output), nil
  113. }
  114. if args[1] == "pool" {
  115. if args[2] == "get" {
  116. return `{"pool_id":1}`, nil
  117. }
  118. if args[2] == "delete" {
  119. if args[3] == "mydata" {
  120. dataDeleted = true
  121. return "", nil
  122. }
  123. if args[3] == "mymetadata" {
  124. metadataDeleted = true
  125. return "", nil
  126. }
  127. }
  128. }
  129. if args[1] == "crush" {
  130. assert.Equal(t, "rule", args[2])
  131. assert.Equal(t, "rm", args[3])
  132. crushDeleted = true
  133. return "", nil
  134. }
  135. }
  136. emptyPool := "{\"images\":{\"count\":0,\"provisioned_bytes\":0,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}"
  137. if args[0] == "pool" {
  138. if args[1] == "stats" {
  139. return emptyPool, nil
  140. }
  141. }
  142. return "", errors.Errorf("unexpected ceph command %q", args)
  143. }
  144. err := RemoveFilesystem(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false)
  145. assert.Nil(t, err)
  146. assert.True(t, metadataDeleted)
  147. assert.True(t, dataDeleted)
  148. assert.True(t, crushDeleted)
  149. }
  150. func TestFailAllStandbyReplayMDS(t *testing.T) {
  151. executor := &exectest.MockExecutor{}
  152. context := &clusterd.Context{Executor: executor}
  153. failedGids := make([]string, 0)
  154. fs := CephFilesystemDetails{
  155. ID: 1,
  156. MDSMap: MDSMap{
  157. FilesystemName: "myfs1",
  158. MetadataPool: 2,
  159. Up: map[string]int{
  160. "mds_0": 123,
  161. },
  162. DataPools: []int{3},
  163. Info: map[string]MDSInfo{
  164. "gid_123": {
  165. GID: 123,
  166. State: "up:active",
  167. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  168. },
  169. "gid_124": {
  170. GID: 124,
  171. State: "up:standby-replay",
  172. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  173. },
  174. },
  175. },
  176. }
  177. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  178. logger.Infof("Command: %s %v", command, args)
  179. if args[0] == "fs" {
  180. if args[1] == "get" {
  181. output, err := json.Marshal(fs)
  182. assert.Nil(t, err)
  183. return string(output), nil
  184. }
  185. if args[1] == "rm" {
  186. return "", nil
  187. }
  188. }
  189. if args[0] == "mds" {
  190. if args[1] == "fail" {
  191. failedGids = append(failedGids, args[2])
  192. return "", nil
  193. }
  194. }
  195. return "", errors.Errorf("unexpected ceph command %q", args)
  196. }
  197. err := FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
  198. assert.NoError(t, err)
  199. assert.ElementsMatch(t, failedGids, []string{"124"})
  200. fs = CephFilesystemDetails{
  201. ID: 1,
  202. MDSMap: MDSMap{
  203. FilesystemName: "myfs1",
  204. MetadataPool: 2,
  205. Up: map[string]int{
  206. "mds_0": 123,
  207. },
  208. DataPools: []int{3},
  209. Info: map[string]MDSInfo{
  210. "gid_123": {
  211. GID: 123,
  212. State: "up:active",
  213. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  214. },
  215. "gid_124": {
  216. GID: 124,
  217. State: "up:standby",
  218. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  219. },
  220. },
  221. },
  222. }
  223. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  224. logger.Infof("Command: %s %v", command, args)
  225. if args[0] == "fs" {
  226. if args[1] == "get" {
  227. output, err := json.Marshal(fs)
  228. assert.Nil(t, err)
  229. return string(output), nil
  230. }
  231. if args[1] == "rm" {
  232. return "", nil
  233. }
  234. }
  235. if args[0] == "mds" {
  236. if args[1] == "fail" {
  237. return "", errors.Errorf("unexpected execution of mds fail")
  238. }
  239. }
  240. return "", errors.Errorf("unexpected ceph command %q", args)
  241. }
  242. err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
  243. assert.NoError(t, err)
  244. fs = CephFilesystemDetails{
  245. ID: 1,
  246. MDSMap: MDSMap{
  247. FilesystemName: "myfs1",
  248. MetadataPool: 2,
  249. Up: map[string]int{
  250. "mds_0": 123,
  251. },
  252. DataPools: []int{3},
  253. Info: map[string]MDSInfo{
  254. "gid_123": {
  255. GID: 123,
  256. State: "up:active",
  257. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  258. },
  259. "gid_124": {
  260. GID: 124,
  261. State: "up:standby-replay",
  262. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  263. },
  264. },
  265. },
  266. }
  267. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  268. logger.Infof("Command: %s %v", command, args)
  269. if args[0] == "fs" {
  270. if args[1] == "get" {
  271. output, err := json.Marshal(fs)
  272. assert.Nil(t, err)
  273. return string(output), nil
  274. }
  275. if args[1] == "rm" {
  276. return "", nil
  277. }
  278. }
  279. if args[0] == "mds" {
  280. if args[1] == "fail" {
  281. return "", errors.Errorf("expected execution of mds fail")
  282. }
  283. }
  284. return "", errors.Errorf("unexpected ceph command %q", args)
  285. }
  286. err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
  287. assert.Error(t, err)
  288. assert.Contains(t, err.Error(), "expected execution of mds fail")
  289. }
  290. func TestGetMdsIdByRank(t *testing.T) {
  291. executor := &exectest.MockExecutor{}
  292. context := &clusterd.Context{Executor: executor}
  293. fs := CephFilesystemDetails{
  294. ID: 1,
  295. MDSMap: MDSMap{
  296. FilesystemName: "myfs1",
  297. MetadataPool: 2,
  298. Up: map[string]int{
  299. "mds_0": 123,
  300. },
  301. DataPools: []int{3},
  302. Info: map[string]MDSInfo{
  303. "gid_123": {
  304. GID: 123,
  305. State: "up:active",
  306. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  307. },
  308. "gid_124": {
  309. GID: 124,
  310. State: "up:standby-replay",
  311. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  312. },
  313. },
  314. },
  315. }
  316. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  317. logger.Infof("Command: %s %v", command, args)
  318. if args[0] == "fs" {
  319. if args[1] == "get" {
  320. output, err := json.Marshal(fs)
  321. assert.Nil(t, err)
  322. return string(output), nil
  323. }
  324. if args[1] == "rm" {
  325. return "", nil
  326. }
  327. }
  328. if args[0] == "mds" {
  329. if args[1] == "fail" {
  330. return "", nil
  331. }
  332. }
  333. return "", errors.Errorf("unexpected ceph command %q", args)
  334. }
  335. name, err := GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
  336. assert.Equal(t, name, "myfs1-a")
  337. assert.NoError(t, err)
  338. // test errors
  339. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  340. logger.Infof("Command: %s %v", command, args)
  341. if args[0] == "fs" {
  342. if args[1] == "get" {
  343. return "", errors.Errorf("test ceph fs get error")
  344. }
  345. if args[1] == "rm" {
  346. return "", nil
  347. }
  348. }
  349. if args[0] == "mds" {
  350. if args[1] == "fail" {
  351. return "", nil
  352. }
  353. }
  354. return "", errors.Errorf("unexpected ceph command %q", args)
  355. }
  356. name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
  357. assert.Equal(t, "", name)
  358. assert.Error(t, err)
  359. assert.Contains(t, err.Error(), "test ceph fs get error")
  360. fs = CephFilesystemDetails{
  361. ID: 1,
  362. MDSMap: MDSMap{
  363. FilesystemName: "myfs1",
  364. MetadataPool: 2,
  365. Up: map[string]int{
  366. "mds_1": 123,
  367. },
  368. DataPools: []int{3},
  369. Info: map[string]MDSInfo{
  370. "gid_123": {
  371. GID: 123,
  372. State: "up:active",
  373. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  374. },
  375. "gid_124": {
  376. GID: 124,
  377. State: "up:standby-replay",
  378. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  379. },
  380. },
  381. },
  382. }
  383. // test get mds by id failed error
  384. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  385. logger.Infof("Command: %s %v", command, args)
  386. if args[0] == "fs" {
  387. if args[1] == "get" {
  388. output, err := json.Marshal(fs)
  389. assert.Nil(t, err)
  390. return string(output), nil
  391. }
  392. if args[1] == "rm" {
  393. return "", nil
  394. }
  395. }
  396. if args[0] == "mds" {
  397. if args[1] == "fail" {
  398. return "", nil
  399. }
  400. }
  401. return "", errors.Errorf("unexpected ceph command %q", args)
  402. }
  403. name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
  404. assert.Equal(t, "", name)
  405. assert.Error(t, err)
  406. assert.Contains(t, err.Error(), "failed to get mds gid from rank 0")
  407. fs = CephFilesystemDetails{
  408. ID: 1,
  409. MDSMap: MDSMap{
  410. FilesystemName: "myfs1",
  411. MetadataPool: 2,
  412. Up: map[string]int{
  413. "mds_0": 123,
  414. },
  415. DataPools: []int{3},
  416. Info: map[string]MDSInfo{
  417. "gid_122": {
  418. GID: 123,
  419. State: "up:active",
  420. Name: fmt.Sprintf("%s-%s", "myfs1", "a"),
  421. },
  422. "gid_124": {
  423. GID: 124,
  424. State: "up:standby-replay",
  425. Name: fmt.Sprintf("%s-%s", "myfs1", "b"),
  426. },
  427. },
  428. },
  429. }
  430. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  431. logger.Infof("Command: %s %v", command, args)
  432. if args[0] == "fs" {
  433. if args[1] == "get" {
  434. output, err := json.Marshal(fs)
  435. assert.Nil(t, err)
  436. return string(output), nil
  437. }
  438. if args[1] == "rm" {
  439. return "", nil
  440. }
  441. }
  442. if args[0] == "mds" {
  443. if args[1] == "fail" {
  444. return "", nil
  445. }
  446. }
  447. return "", errors.Errorf("unexpected ceph command %q", args)
  448. }
  449. name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
  450. assert.Equal(t, "", name)
  451. assert.Error(t, err)
  452. assert.Contains(t, err.Error(), "failed to get mds info for rank 0")
  453. }
  454. func TestGetMDSDump(t *testing.T) {
  455. executor := &exectest.MockExecutor{}
  456. context := &clusterd.Context{Executor: executor}
  457. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  458. logger.Infof("Command: %s %v", command, args)
  459. if args[0] == "fs" {
  460. if args[1] == "dump" {
  461. output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat":
  462. {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs",
  463. "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap",
  464. "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags":
  465. {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby",
  466. "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863",
  467. "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2",
  468. "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745},
  469. {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}`
  470. return output, nil
  471. }
  472. }
  473. return "", errors.Errorf("unexpected ceph command %q", args)
  474. }
  475. mdsDump, err := GetMDSDump(context, AdminTestClusterInfo("mycluster"))
  476. assert.NoError(t, err)
  477. assert.ElementsMatch(t, mdsDump.Standbys, []MDSStandBy{{Name: "rook-ceph-filesystem-b", Rank: -1}})
  478. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  479. logger.Infof("Command: %s %v", command, args)
  480. if args[0] == "fs" {
  481. if args[1] == "dump" {
  482. return "", errors.Errorf("dump fs failed")
  483. }
  484. }
  485. return "", errors.Errorf("unexpected ceph command %q", args)
  486. }
  487. _, err = GetMDSDump(context, AdminTestClusterInfo("mycluster"))
  488. assert.Error(t, err)
  489. }
  490. func TestWaitForNoStandbys(t *testing.T) {
  491. executor := &exectest.MockExecutor{}
  492. context := &clusterd.Context{Executor: executor}
  493. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  494. logger.Infof("Command: %s %v", command, args)
  495. if args[0] == "fs" {
  496. if args[1] == "dump" {
  497. output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat":
  498. {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs",
  499. "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap",
  500. "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags":
  501. {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby",
  502. "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863",
  503. "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2",
  504. "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745},
  505. {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}`
  506. return output, nil
  507. }
  508. }
  509. return "", errors.Errorf("unexpected ceph command %q", args)
  510. }
  511. err := WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
  512. assert.Error(t, err)
  513. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  514. logger.Infof("Command: %s %v", command, args)
  515. if args[0] == "fs" {
  516. if args[1] == "dump" {
  517. return "", errors.Errorf("failed to dump fs info")
  518. }
  519. }
  520. return "", errors.Errorf("unexpected ceph command %q", args)
  521. }
  522. err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
  523. assert.Error(t, err)
  524. firstCall := true
  525. executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
  526. logger.Infof("Command: %s %v", command, args)
  527. if args[0] == "fs" {
  528. if args[1] == "dump" {
  529. if firstCall {
  530. firstCall = false
  531. output := `{"epoch":12,"default_fscid":1,"compat":{"compat":{},"ro_compat":{},"incompat":
  532. {"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs",
  533. "feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap",
  534. "feature_8":"no anchor table","feature_9":"file layout v2","feature_10":"snaprealm v2"}},"feature_flags":
  535. {"enable_multiple":false,"ever_enabled_multiple":false},"standbys":[{"gid":26829,"name":"rook-ceph-filesystem-b","rank":-1,"incarnation":0,"state":"up:standby",
  536. "state_seq":1,"addr":"10.110.29.245:6805/3170687682","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.245:6804","nonce":3170687682},{"type":"v1","addr":"10.110.29.245:6805","nonce":3170687682}]},"export_targets":[],"features":4611087854035861503,"flags":0,"epoch":12}],"filesystems":[{"mdsmap":{"epoch":11,"flags":18,"ever_allowed_features":32,"explicitly_allowed_features":32,"created":"2021-04-23 01:52:33.467863",
  537. "modified":"2021-04-23 08:31:03.019621","tableserver":0,"root":0,"session_timeout":60,"session_autoclose":300,"min_compat_client":"-1 (unspecified)","max_file_size":1099511627776,"last_failure":0,"last_failure_osd_epoch":0,"compat":{"compat":{},"ro_compat":{},"incompat":{"feature_1":"base v0.20","feature_2":"client writeable ranges","feature_3":"default file layouts on dirs","feature_4":"dir inode in separate object","feature_5":"mds uses versioned encoding","feature_6":"dirfrag is stored in omap","feature_8":"no anchor table","feature_9":"file layout v2",
  538. "feature_10":"snaprealm v2"}},"max_mds":1,"in":[0],"up":{"mds_0":14707},"failed":[],"damaged":[],"stopped":[],"info":{"gid_14707":{"gid":14707,"name":"rook-ceph-filesystem-a","rank":0,"incarnation":5,"state":"up:active","state_seq":2,"addr":"10.110.29.236:6807/1996297745","addrs":{"addrvec":[{"type":"v2","addr":"10.110.29.236:6806","nonce":1996297745},
  539. {"type":"v1","addr":"10.110.29.236:6807","nonce":1996297745}]},"export_targets":[],"features":4611087854035861503,"flags":0}},"data_pools":[3],"metadata_pool":2,"enabled":true,"fs_name":"rook-ceph-filesystem","balancer":"","standby_count_wanted":1},"id":1}]}`
  540. return output, nil
  541. }
  542. return `{"standbys":[],"filesystemds":[]}`, nil
  543. }
  544. }
  545. return "", errors.Errorf("unexpected ceph command %q", args)
  546. }
  547. err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
  548. assert.NoError(t, err)
  549. }
  550. func TestListSubvolumeGroups(t *testing.T) {
  551. fsName := "myfs"
  552. newContext := func(retString string, retErr error) *clusterd.Context {
  553. t.Helper()
  554. executor := &exectest.MockExecutor{
  555. MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) {
  556. t.Logf("Command: %s %v", command, args)
  557. if args[0] == "fs" && args[1] == "subvolumegroup" && args[2] == "ls" && args[3] == fsName {
  558. return retString, retErr
  559. }
  560. panic(fmt.Sprintf("unhandled command %q %v", command, args))
  561. },
  562. }
  563. return &clusterd.Context{Executor: executor}
  564. }
  565. t.Run("no groups", func(t *testing.T) {
  566. ctx := newContext("[]", nil)
  567. ret, err := ListSubvolumeGroups(ctx, AdminTestClusterInfo("mycluster"), fsName)
  568. assert.NoError(t, err)
  569. assert.Empty(t, ret)
  570. })
  571. t.Run("one group", func(t *testing.T) {
  572. ctx := newContext(`[
  573. {
  574. "name": "csi"
  575. }
  576. ]
  577. `, nil)
  578. ret, err := ListSubvolumeGroups(ctx, AdminTestClusterInfo("mycluster"), fsName)
  579. assert.NoError(t, err)
  580. assert.ElementsMatch(t, ret, SubvolumeGroupList{
  581. SubvolumeGroup{Name: "csi"},
  582. })
  583. })
  584. t.Run("multiple groups", func(t *testing.T) {
  585. ctx := newContext(`[
  586. {
  587. "name": "group-a"
  588. },
  589. {
  590. "name": "csi"
  591. }
  592. ]
  593. `, nil)
  594. ret, err := ListSubvolumeGroups(ctx, AdminTestClusterInfo("mycluster"), fsName)
  595. assert.NoError(t, err)
  596. assert.ElementsMatch(t, ret, SubvolumeGroupList{
  597. SubvolumeGroup{Name: "group-a"},
  598. SubvolumeGroup{Name: "csi"},
  599. })
  600. })
  601. t.Run("cli return error", func(t *testing.T) {
  602. ctx := newContext(`[
  603. {
  604. "name": "csi"
  605. }
  606. ]`, errors.New("induced error"))
  607. ret, err := ListSubvolumeGroups(ctx, AdminTestClusterInfo("mycluster"), fsName)
  608. assert.Error(t, err)
  609. t.Log("error return", err.Error())
  610. assert.Contains(t, err.Error(), `induced error`)
  611. assert.Contains(t, err.Error(), `failed to list subvolumegroups in filesystem "myfs"`)
  612. assert.Empty(t, ret)
  613. })
  614. }
  615. func TestListSubvolumesInGroup(t *testing.T) {
  616. fsName := "myfs"
  617. groupName := "csi"
  618. newContext := func(retString string, retErr error) *clusterd.Context {
  619. t.Helper()
  620. executor := &exectest.MockExecutor{
  621. MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) {
  622. t.Logf("Command: %s %v", command, args)
  623. if args[0] == "fs" && args[1] == "subvolume" && args[2] == "ls" && args[3] == fsName && args[4] == groupName {
  624. return retString, retErr
  625. }
  626. panic(fmt.Sprintf("unhandled command %q %v", command, args))
  627. },
  628. }
  629. return &clusterd.Context{Executor: executor}
  630. }
  631. t.Run("no subvolumes", func(t *testing.T) {
  632. ctx := newContext("[]", nil)
  633. ret, err := ListSubvolumesInGroup(ctx, AdminTestClusterInfo("mycluster"), fsName, groupName)
  634. assert.NoError(t, err)
  635. assert.Empty(t, ret)
  636. })
  637. t.Run("one subvolume", func(t *testing.T) {
  638. ctx := newContext(`[
  639. {
  640. "name": "csi-vol-hash"
  641. }
  642. ]
  643. `, nil)
  644. ret, err := ListSubvolumesInGroup(ctx, AdminTestClusterInfo("mycluster"), fsName, groupName)
  645. assert.NoError(t, err)
  646. assert.ElementsMatch(t, ret, SubvolumeList{
  647. Subvolume{Name: "csi-vol-hash"},
  648. })
  649. })
  650. t.Run("multiple groups", func(t *testing.T) {
  651. ctx := newContext(`[
  652. {
  653. "name": "csi-vol-hash"
  654. },
  655. {
  656. "name": "csi-nfs-vol-hash"
  657. }
  658. ]
  659. `, nil)
  660. ret, err := ListSubvolumesInGroup(ctx, AdminTestClusterInfo("mycluster"), fsName, groupName)
  661. assert.NoError(t, err)
  662. assert.ElementsMatch(t, ret, SubvolumeList{
  663. Subvolume{Name: "csi-vol-hash"},
  664. Subvolume{Name: "csi-nfs-vol-hash"},
  665. })
  666. })
  667. t.Run("cli return error", func(t *testing.T) {
  668. ctx := newContext(`[
  669. {
  670. "name": "csi-vol-hash"
  671. }
  672. ]`, errors.New("induced error"))
  673. ret, err := ListSubvolumesInGroup(ctx, AdminTestClusterInfo("mycluster"), fsName, groupName)
  674. assert.Error(t, err)
  675. t.Log("error return", err.Error())
  676. assert.Contains(t, err.Error(), `induced error`)
  677. assert.Contains(t, err.Error(), `failed to list subvolumes in filesystem "myfs" subvolume group "csi"`)
  678. assert.Empty(t, ret)
  679. })
  680. }