Skip to content

Commit

Permalink
Allow poolSpec for default block, file, object pools via StorageCluster
Browse files Browse the repository at this point in the history
This change enables configuring poolSpec for default cephBlockPool,
cephFS data pool, and cephObjectStore data pool through the
StorageCluster CR. The poolSpec is passed directly to the Ceph CRs,
with only necessary fields validated. Other fields remain untouched.

Most fields, including target_size_ratio, are now configurable, giving
users more control over default pool configurations.

Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Feb 7, 2025
1 parent 001b828 commit 427385d
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 52 deletions.
14 changes: 8 additions & 6 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,12 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
}

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
// Pass the poolSpec from the storageCluster CR

cephBlockPool.Spec.PoolSpec = storageCluster.Spec.ManagedResources.CephBlockPools.PoolSpec

// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true

// Since provider mode handles mirroring, we only need to handle for internal mode
Expand Down Expand Up @@ -151,7 +153,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -199,7 +201,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, poolTypeData),
EnableRBDStats: true,
},
},
Expand Down Expand Up @@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, poolTypeMetadata),
EnableRBDStats: true,
},
Name: ".nfs",
Expand Down
28 changes: 28 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ const (
diskSpeedFast diskSpeed = "fast"
)

const (
poolTypeData = "data"
poolTypeMetadata = "metadata"
)

type knownDiskType struct {
speed diskSpeed
provisioner StorageClassProvisionerType
Expand Down Expand Up @@ -1418,3 +1423,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro
}
return false
}

// setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary
func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
if poolSpec.DeviceClass == "" {
poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass
}
if poolSpec.FailureDomain == "" {
poolSpec.FailureDomain = getFailureDomain(sc)
}
// Set default replication settings if necessary
// Always set the default Size & ReplicasPerFailureDomain in arbiter mode
defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeData)
if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.Size = defaultReplicatedSpec.Size
}
if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain
}
if poolSpec.Replicated.TargetSizeRatio == 0.0 {
poolSpec.Replicated.TargetSizeRatio = defaultReplicatedSpec.TargetSizeRatio
}
}
38 changes: 4 additions & 34 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"),
Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
MetadataServer: cephv1.MetadataServerSpec{
Expand All @@ -56,30 +56,10 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
// Append additional pools from specified additional data pools
ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...)

// Iterate over each pool and set default values if necessary
defaultPoolSpec := generateDefaultPoolSpec(initStorageCluster)
for i := range ret.Spec.DataPools {
pool := &ret.Spec.DataPools[i]
// Set default device class if not specified
if pool.PoolSpec.DeviceClass == "" {
pool.PoolSpec.DeviceClass = defaultPoolSpec.DeviceClass
}
// Set EnableCrushUpdates to always be true
pool.PoolSpec.EnableCrushUpdates = true
// Set default replication settings if not specified
if pool.PoolSpec.Replicated.Size == 0 {
pool.PoolSpec.Replicated.Size = defaultPoolSpec.Replicated.Size
}
if pool.PoolSpec.Replicated.ReplicasPerFailureDomain == 0 {
pool.PoolSpec.Replicated.ReplicasPerFailureDomain = defaultPoolSpec.Replicated.ReplicasPerFailureDomain
}
if pool.PoolSpec.Replicated.TargetSizeRatio == 0 {
pool.PoolSpec.Replicated.TargetSizeRatio = defaultPoolSpec.Replicated.TargetSizeRatio
}
// Set default failure domain if not specified
if pool.PoolSpec.FailureDomain == "" {
pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain
}
poolSpec := &ret.Spec.DataPools[i].PoolSpec
// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(poolSpec, initStorageCluster)
}

// set device class for metadata pool from the default data pool
Expand Down Expand Up @@ -282,13 +262,3 @@ func getActiveMetadataServers(sc *ocsv1.StorageCluster) int {

return defaults.CephFSActiveMetadataServers
}

// Define a function to generate default pool specifications
func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
return cephv1.PoolSpec{
DeviceClass: sc.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
FailureDomain: sc.Status.FailureDomain,
}
}
8 changes: 7 additions & 1 deletion controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,13 @@ func TestCephFileSystemDataPools(t *testing.T) {
mocksc := &api.StorageCluster{}
mockStorageCluster.DeepCopyInto(mocksc)
mocksc.Status.FailureDomain = "zone"
defaultPoolSpec := generateDefaultPoolSpec(mocksc)
defaultPoolSpec := cephv1.PoolSpec{
EnableCrushUpdates: true,
DeviceClass: mocksc.Status.DefaultCephDeviceClass,
FailureDomain: getFailureDomain(mocksc),
Replicated: generateCephReplicatedSpec(mocksc, poolTypeData),
}

var cases = []struct {
label string
sc *api.StorageCluster
Expand Down
12 changes: 5 additions & 7 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,17 +168,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
},
Spec: cephv1.ObjectStoreSpec{
PreservePoolsOnDelete: false,
DataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
},
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "metadata"),
Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata),
},
Gateway: cephv1.GatewaySpec{
Port: 80,
Expand Down Expand Up @@ -209,6 +204,9 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork
}

// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(&obj.Spec.DataPool, initData)

// if kmsConfig is not 'nil', add the KMS details to ObjectStore spec
if kmsConfigMap != nil {

Expand Down
3 changes: 1 addition & 2 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,7 @@ func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string)

crs.Size = getCephPoolReplicatedSize(initData)
crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData))
//lint:ignore ST1017 required to compare it directly
if "data" == poolType {
if poolType == poolTypeData {
crs.TargetSizeRatio = .49
}

Expand Down

0 comments on commit 427385d

Please sign in to comment.