Skip to content

Commit

Permalink
Allow poolSpec for metadata pools of cephFS and objectStore
Browse files Browse the repository at this point in the history
This change enables configuring poolSpec for metadata pool of default
cephFS & cephObjectStore through the StorageCluster CR.
The poolSpec is passed directly to the Ceph CRs, with only necessary
fields validated. Other fields remain untouched.

Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Feb 27, 2025
1 parent 41ac910 commit 02f0317
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 24 deletions.
10 changes: 2 additions & 8 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.Name = ".mgr"
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -198,10 +195,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.Name = ".nfs"
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

Expand Down
20 changes: 20 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1483,6 +1483,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro
return false
}

// setDefaultMetadataPoolSpec sets the common pool spec for all metadata pools as necessary
func setDefaultMetadataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
if poolSpec.DeviceClass == "" {
poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass
}
if poolSpec.FailureDomain == "" {
poolSpec.FailureDomain = getFailureDomain(sc)
}
// Set default replication settings if necessary
// Always set the default Size & ReplicasPerFailureDomain in arbiter mode
defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeMetadata)
if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.Size = defaultReplicatedSpec.Size
}
if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain
}
}

// setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary
func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
Expand Down
12 changes: 4 additions & 8 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
},
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
PoolSpec: initStorageCluster.Spec.ManagedResources.CephFilesystems.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR
},
MetadataServer: cephv1.MetadataServerSpec{
ActiveCount: int32(getActiveMetadataServers(initStorageCluster)),
ActiveStandby: true,
Expand All @@ -55,16 +53,14 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster

// Append additional pools from specified additional data pools
ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...)

for i := range ret.Spec.DataPools {
poolSpec := &ret.Spec.DataPools[i].PoolSpec
// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(poolSpec, initStorageCluster)
}

// set device class for metadata pool from the default data pool
ret.Spec.MetadataPool.DeviceClass = ret.Spec.DataPools[0].PoolSpec.DeviceClass
ret.Spec.MetadataPool.EnableCrushUpdates = true
// Set default values in the metadata pool spec as necessary
setDefaultDataPoolSpec(&ret.Spec.MetadataPool.PoolSpec, initStorageCluster)

err := controllerutil.SetControllerReference(initStorageCluster, ret, r.Scheme)
if err != nil {
Expand Down
14 changes: 6 additions & 8 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,8 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
},
Spec: cephv1.ObjectStoreSpec{
PreservePoolsOnDelete: false,
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata),
},
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: initData.Spec.ManagedResources.CephObjectStores.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR
Gateway: cephv1.GatewaySpec{
Port: 80,
SecurePort: 443,
Expand Down Expand Up @@ -204,9 +199,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork
}

// Set default values in the poolSpec as necessary
// Set default values in the data pool spec as necessary
setDefaultDataPoolSpec(&obj.Spec.DataPool, initData)

// Set default values in the metadata pool spec as necessary
setDefaultMetadataPoolSpec(&obj.Spec.MetadataPool, initData)

// if kmsConfig is not 'nil', add the KMS details to ObjectStore spec
if kmsConfigMap != nil {

Expand Down

0 comments on commit 02f0317

Please sign in to comment.