Skip to content

Commit

Permalink
Add support for configuring CephCluster healthCheck
Browse files Browse the repository at this point in the history
enable advanced configuration of CephCluster healthCheck settings
via StorageCluster spec.
based on Rook CephClusterHealthCheckSpec type.

Signed-off-by: Oded Viner <[email protected]>
  • Loading branch information
OdedViner committed Jan 22, 2025
1 parent 57599d6 commit 239a822
Show file tree
Hide file tree
Showing 11 changed files with 1,289 additions and 0 deletions.
3 changes: 3 additions & 0 deletions api/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,9 @@ type ManageCephCluster struct {

// Whether to allow updating the device class after the OSD is initially provisioned
AllowDeviceClassUpdate bool `json:"allowDeviceClassUpdate,omitempty"`

// CephClusterHealthCheckSpec represent the healthcheck for Ceph daemons
HealthCheck *rookCephv1.CephClusterHealthCheckSpec `json:"healthCheck,omitempty"`
}

// ManageCephConfig defines how to reconcile the Ceph configuration
Expand Down
5 changes: 5 additions & 0 deletions api/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

396 changes: 396 additions & 0 deletions config/crd/bases/ocs.openshift.io_storageclusters.yaml

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,12 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, kmsConfigMap *co
cephCluster.Spec.DisruptionManagement.OSDMaintenanceTimeout = sc.Spec.ManagedResources.CephCluster.OsdMaintenanceTimeout
}

if sc.Spec.ManagedResources.CephCluster.HealthCheck != nil {
cephCluster.Spec.HealthCheck = *sc.Spec.ManagedResources.CephCluster.HealthCheck
} else {
cephCluster.Spec.HealthCheck = rookCephv1.CephClusterHealthCheckSpec{}
}

if sc.Spec.LogCollector != nil {
if sc.Spec.LogCollector.Periodicity != "" {
cephCluster.Spec.LogCollector.Periodicity = sc.Spec.LogCollector.Periodicity
Expand Down
71 changes: 71 additions & 0 deletions controllers/storagecluster/cephcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1539,6 +1539,77 @@ func TestEnsureUpgradeReliabilityParams(t *testing.T) {
assert.Equal(t, 45*time.Minute, expected.Spec.DisruptionManagement.OSDMaintenanceTimeout)
}

func TestHealthCheckConfiguration(t *testing.T) {
sc := &ocsv1.StorageCluster{}
mockStorageCluster.DeepCopyInto(sc)
interval := metav1.Duration{
Duration: 20 * time.Second,
}
mockProbeSpec := &rookCephv1.ProbeSpec{
Disabled: false,
Probe: &corev1.Probe{
InitialDelaySeconds: 10,
TimeoutSeconds: 5,
},
}
probeMap := make(map[rookCephv1.KeyType]*rookCephv1.ProbeSpec)
probeMap["abc"] = mockProbeSpec

sc.Spec.ManagedResources.CephCluster.HealthCheck = &rookCephv1.CephClusterHealthCheckSpec{
DaemonHealth: rookCephv1.DaemonHealthSpec{
Status: rookCephv1.HealthCheckSpec{
Timeout: "11",
Disabled: false,
Interval: &interval,
},
Monitor: rookCephv1.HealthCheckSpec{
Timeout: "22",
Disabled: true,
Interval: &interval,
},
ObjectStorageDaemon: rookCephv1.HealthCheckSpec{
Timeout: "33",
Disabled: false,
Interval: &interval,
},
},
StartupProbe: probeMap,
LivenessProbe: probeMap,
}
expected := newCephCluster(sc, "", nil, log)

assert.Equal(t, "11", expected.Spec.HealthCheck.DaemonHealth.Status.Timeout)
assert.Equal(t, false, expected.Spec.HealthCheck.DaemonHealth.Status.Disabled)
assert.Equal(t, &interval, expected.Spec.HealthCheck.DaemonHealth.Status.Interval)

assert.Equal(t, "22", expected.Spec.HealthCheck.DaemonHealth.Monitor.Timeout)
assert.Equal(t, true, expected.Spec.HealthCheck.DaemonHealth.Monitor.Disabled)
assert.Equal(t, &interval, expected.Spec.HealthCheck.DaemonHealth.Monitor.Interval)

assert.Equal(t, "33", expected.Spec.HealthCheck.DaemonHealth.ObjectStorageDaemon.Timeout)
assert.Equal(t, false, expected.Spec.HealthCheck.DaemonHealth.ObjectStorageDaemon.Disabled)
assert.Equal(t, &interval, expected.Spec.HealthCheck.DaemonHealth.ObjectStorageDaemon.Interval)

compareProbeMaps(t, probeMap, expected.Spec.HealthCheck.LivenessProbe)
compareProbeMaps(t, probeMap, expected.Spec.HealthCheck.StartupProbe)

}

// Helper function to compare two maps
func compareProbeMaps(t *testing.T, map1, map2 map[rookCephv1.KeyType]*rookCephv1.ProbeSpec) {
assert.Equal(t, len(map1), len(map2))

for key, value1 := range map1 {
value2, exists := map2[key]
assert.Assert(t, exists, "Key %v not found in map2", key)

// Compare the actual ProbeSpec values
assert.Equal(t, value1.Disabled, value2.Disabled)
assert.Equal(t, value1.Probe.InitialDelaySeconds, value2.Probe.InitialDelaySeconds)
assert.Equal(t, value1.Probe.TimeoutSeconds, value2.Probe.TimeoutSeconds)
}
}

func TestDetermineDefaultCephDeviceClass(t *testing.T) {
cases := []struct {
label string
Expand Down
396 changes: 396 additions & 0 deletions deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml

Large diffs are not rendered by default.

396 changes: 396 additions & 0 deletions deploy/ocs-operator/manifests/storagecluster.crd.yaml

Large diffs are not rendered by default.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 239a822

Please sign in to comment.