From 02f031759ec25871088bca0dfdd72ad582d462e8 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Thu, 27 Feb 2025 11:25:18 +0530 Subject: [PATCH] Allow poolSpec for metadata pools of cephFS and objectStore This change enables configuring poolSpec for metadata pool of default cephFS & cephObjectStore through the StorageCluster CR. The poolSpec is passed directly to the Ceph CRs, with only necessary fields validated. Other fields remain untouched. Signed-off-by: Malay Kumar Parida --- controllers/storagecluster/cephblockpools.go | 10 ++-------- controllers/storagecluster/cephcluster.go | 20 +++++++++++++++++++ controllers/storagecluster/cephfilesystem.go | 12 ++++------- .../storagecluster/cephobjectstores.go | 14 ++++++------- 4 files changed, 32 insertions(+), 24 deletions(-) diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 55a8140719..234ff0e892 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -150,10 +150,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile _, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error { cephBlockPool.Spec.Name = ".mgr" - cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass - cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true - cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata) + setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster) util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) @@ -198,10 +195,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile _, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error { cephBlockPool.Spec.Name = ".nfs" - cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass - cephBlockPool.Spec.EnableCrushUpdates = true - cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata) + setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster) cephBlockPool.Spec.PoolSpec.EnableRBDStats = true util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") diff --git a/controllers/storagecluster/cephcluster.go b/controllers/storagecluster/cephcluster.go index 086066baf3..d002e22453 100644 --- a/controllers/storagecluster/cephcluster.go +++ b/controllers/storagecluster/cephcluster.go @@ -1483,6 +1483,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro return false } +// setDefaultMetadataPoolSpec sets the common pool spec for all metadata pools as necessary +func setDefaultMetadataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) { + poolSpec.EnableCrushUpdates = true + if poolSpec.DeviceClass == "" { + poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass + } + if poolSpec.FailureDomain == "" { + poolSpec.FailureDomain = getFailureDomain(sc) + } + // Set default replication settings if necessary + // Always set the default Size & ReplicasPerFailureDomain in arbiter mode + defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeMetadata) + if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) { + poolSpec.Replicated.Size = defaultReplicatedSpec.Size + } + if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) { + poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain + } +} + // setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) { poolSpec.EnableCrushUpdates = true diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 0d73013a0a..6daf2c963b 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -30,10 +30,8 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster }, Spec: cephv1.FilesystemSpec{ MetadataPool: cephv1.NamedPoolSpec{ - PoolSpec: cephv1.PoolSpec{ - Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata), - FailureDomain: initStorageCluster.Status.FailureDomain, - }}, + PoolSpec: initStorageCluster.Spec.ManagedResources.CephFilesystems.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR + }, MetadataServer: cephv1.MetadataServerSpec{ ActiveCount: int32(getActiveMetadataServers(initStorageCluster)), ActiveStandby: true, @@ -55,16 +53,14 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster // Append additional pools from specified additional data pools ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...) - for i := range ret.Spec.DataPools { poolSpec := &ret.Spec.DataPools[i].PoolSpec // Set default values in the poolSpec as necessary setDefaultDataPoolSpec(poolSpec, initStorageCluster) } - // set device class for metadata pool from the default data pool - ret.Spec.MetadataPool.DeviceClass = ret.Spec.DataPools[0].PoolSpec.DeviceClass - ret.Spec.MetadataPool.EnableCrushUpdates = true + // Set default values in the metadata pool spec as necessary + setDefaultDataPoolSpec(&ret.Spec.MetadataPool.PoolSpec, initStorageCluster) err := controllerutil.SetControllerReference(initStorageCluster, ret, r.Scheme) if err != nil { diff --git a/controllers/storagecluster/cephobjectstores.go b/controllers/storagecluster/cephobjectstores.go index f178182704..e7563e2a4b 100644 --- a/controllers/storagecluster/cephobjectstores.go +++ b/controllers/storagecluster/cephobjectstores.go @@ -168,13 +168,8 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S }, Spec: cephv1.ObjectStoreSpec{ PreservePoolsOnDelete: false, - DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR - MetadataPool: cephv1.PoolSpec{ - DeviceClass: initData.Status.DefaultCephDeviceClass, - EnableCrushUpdates: true, - FailureDomain: initData.Status.FailureDomain, - Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata), - }, + DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR + MetadataPool: initData.Spec.ManagedResources.CephObjectStores.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR Gateway: cephv1.GatewaySpec{ Port: 80, SecurePort: 443, @@ -204,9 +199,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork } - // Set default values in the poolSpec as necessary + // Set default values in the data pool spec as necessary setDefaultDataPoolSpec(&obj.Spec.DataPool, initData) + // Set default values in the metadata pool spec as necessary + setDefaultMetadataPoolSpec(&obj.Spec.MetadataPool, initData) + // if kmsConfig is not 'nil', add the KMS details to ObjectStore spec if kmsConfigMap != nil {