info.go 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. Copyright 2016 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package client
  14. import (
  15. "context"
  16. "fmt"
  17. "net"
  18. "testing"
  19. "time"
  20. "github.com/pkg/errors"
  21. cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
  22. cephver "github.com/rook/rook/pkg/operator/ceph/version"
  23. "github.com/rook/rook/pkg/operator/k8sutil"
  24. "github.com/stretchr/testify/assert"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/runtime"
  27. "k8s.io/apimachinery/pkg/types"
  28. )
  29. // ClusterInfo is a collection of information about a particular Ceph cluster. Rook uses information
  30. // about the cluster to configure daemons to connect to the desired cluster.
  31. type ClusterInfo struct {
  32. FSID string
  33. MonitorSecret string
  34. CephCred CephCred
  35. Monitors map[string]*MonInfo
  36. CephVersion cephver.CephVersion
  37. Namespace string
  38. OwnerInfo *k8sutil.OwnerInfo
  39. // Hide the name of the cluster since in 99% of uses we want to use the cluster namespace.
  40. // If the CR name is needed, access it through the NamespacedName() method.
  41. name string
  42. OsdUpgradeTimeout time.Duration
  43. NetworkSpec cephv1.NetworkSpec
  44. CSIDriverSpec cephv1.CSIDriverSpec
  45. // A context to cancel the context it is used to determine whether the reconcile loop should
  46. // exist (if the context has been cancelled). This cannot be in main clusterd context since this
  47. // is a pointer passed through the entire life cycle or the operator. If the context is
  48. // cancelled it will immediately be re-created, thus existing reconciles loops will not be
  49. // cancelled.
  50. // Whereas if passed through clusterInfo, we don't have that problem since clusterInfo is
  51. // re-hydrated when a context is cancelled.
  52. Context context.Context
  53. }
  54. // MonInfo is a collection of information about a Ceph mon.
  55. type MonInfo struct {
  56. Name string `json:"name"`
  57. Endpoint string `json:"endpoint"`
  58. // Whether detected out of quorum by rook. May be different from actual ceph quorum.
  59. OutOfQuorum bool `json:"outOfQuorum"`
  60. }
  61. // CephCred represents the Ceph cluster username and key used by the operator.
  62. // For converged clusters it will be the admin key, but external clusters will have a
  63. // lower-privileged key.
  64. type CephCred struct {
  65. Username string `json:"name"`
  66. Secret string `json:"secret"`
  67. }
  68. func NewClusterInfo(namespace, name string) *ClusterInfo {
  69. return &ClusterInfo{Namespace: namespace, name: name}
  70. }
  71. func (c *ClusterInfo) SetName(name string) {
  72. c.name = name
  73. }
  74. func (c *ClusterInfo) NamespacedName() types.NamespacedName {
  75. if c.name == "" {
  76. panic("name is not set on the clusterInfo")
  77. }
  78. return types.NamespacedName{Namespace: c.Namespace, Name: c.name}
  79. }
  80. // AdminClusterInfo() creates a ClusterInfo with the basic info to access the cluster
  81. // as an admin.
  82. func AdminClusterInfo(ctx context.Context, namespace, name string) *ClusterInfo {
  83. ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "")
  84. return &ClusterInfo{
  85. Namespace: namespace,
  86. CephCred: CephCred{
  87. Username: AdminUsername,
  88. },
  89. name: name,
  90. OwnerInfo: ownerInfo,
  91. Context: ctx,
  92. }
  93. }
  94. // AdminTestClusterInfo() creates a ClusterInfo with the basic info to access the cluster
  95. // as an admin. This cluster info should only be used by unit or integration tests.
  96. func AdminTestClusterInfo(namespace string) *ClusterInfo {
  97. return AdminClusterInfo(context.TODO(), namespace, "testing")
  98. }
  99. // IsInitialized returns true if the critical information in the ClusterInfo struct has been filled
  100. // in. This method exists less out of necessity than the desire to be explicit about the lifecycle
  101. // of the ClusterInfo struct during startup, specifically that it is expected to exist after the
  102. // Rook operator has started up or connected to the first components of the Ceph cluster.
  103. func (c *ClusterInfo) IsInitialized() error {
  104. if c == nil {
  105. return errors.New("clusterInfo is nil")
  106. }
  107. if c.FSID == "" {
  108. return errors.New("cluster fsid is empty")
  109. }
  110. if c.MonitorSecret == "" {
  111. return errors.New("monitor secret is empty")
  112. }
  113. if c.CephCred.Username == "" {
  114. return errors.New("ceph username is empty")
  115. }
  116. if c.CephCred.Secret == "" {
  117. return errors.New("ceph secret is empty")
  118. }
  119. if c.Context == nil {
  120. return errors.New("context is nil")
  121. }
  122. if c.Context.Err() != nil {
  123. return c.Context.Err()
  124. }
  125. return nil
  126. }
  127. // NewMonInfo returns a new Ceph mon info struct from the given inputs.
  128. func NewMonInfo(name, ip string, port int32) *MonInfo {
  129. return &MonInfo{Name: name, Endpoint: net.JoinHostPort(ip, fmt.Sprintf("%d", port))}
  130. }
  131. func NewMinimumOwnerInfo(t *testing.T) *k8sutil.OwnerInfo {
  132. cluster := &cephv1.CephCluster{}
  133. scheme := runtime.NewScheme()
  134. err := cephv1.AddToScheme(scheme)
  135. assert.NoError(t, err)
  136. return k8sutil.NewOwnerInfo(cluster, scheme)
  137. }
  138. func NewMinimumOwnerInfoWithOwnerRef() *k8sutil.OwnerInfo {
  139. return k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "")
  140. }