Skip to content

Commit

Permalink
Merge pull request #3066 from malayparida2000/metadat_pool
Browse files Browse the repository at this point in the history
Allow specifying poolSpec for metadata pools of cephFS and cephObjectStore
  • Loading branch information
openshift-merge-bot[bot] authored Feb 27, 2025
2 parents 6fb4e84 + 02f0317 commit a22914a
Show file tree
Hide file tree
Showing 13 changed files with 1,322 additions and 24 deletions.
4 changes: 4 additions & 0 deletions api/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,8 @@ type ManageCephFilesystems struct {
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
StorageClassName string `json:"storageClassName,omitempty"`
// MetadataPoolSpec specifies the pool specification for the default cephFS metadata pool
MetadataPoolSpec rookCephv1.PoolSpec `json:"metadataPoolSpec,omitempty"`
// DataPoolSpec specifies the pool specification for the default cephfs data pool
DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"`
// AdditionalDataPools specifies list of additional named cephfs data pools
Expand All @@ -292,6 +294,8 @@ type ManageCephObjectStores struct {
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
StorageClassName string `json:"storageClassName,omitempty"`
// MetadataPoolSpec specifies the pool specification for the default cephObjectStore metadata pool
MetadataPoolSpec rookCephv1.PoolSpec `json:"metadataPoolSpec,omitempty"`
// DataPoolSpec specifies the pool specification for the default cephObjectStore data pool
DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"`
}
Expand Down
2 changes: 2 additions & 0 deletions api/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

424 changes: 424 additions & 0 deletions config/crd/bases/ocs.openshift.io_storageclusters.yaml

Large diffs are not rendered by default.

10 changes: 2 additions & 8 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.Name = ".mgr"
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -198,10 +195,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.Name = ".nfs"
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
setDefaultMetadataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

Expand Down
20 changes: 20 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1483,6 +1483,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro
return false
}

// setDefaultMetadataPoolSpec sets the common pool spec for all metadata pools as necessary
func setDefaultMetadataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
if poolSpec.DeviceClass == "" {
poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass
}
if poolSpec.FailureDomain == "" {
poolSpec.FailureDomain = getFailureDomain(sc)
}
// Set default replication settings if necessary
// Always set the default Size & ReplicasPerFailureDomain in arbiter mode
defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeMetadata)
if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.Size = defaultReplicatedSpec.Size
}
if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain
}
}

// setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary
func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
Expand Down
12 changes: 4 additions & 8 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
},
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
PoolSpec: initStorageCluster.Spec.ManagedResources.CephFilesystems.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR
},
MetadataServer: cephv1.MetadataServerSpec{
ActiveCount: int32(getActiveMetadataServers(initStorageCluster)),
ActiveStandby: true,
Expand All @@ -55,16 +53,14 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster

// Append additional pools from specified additional data pools
ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...)

for i := range ret.Spec.DataPools {
poolSpec := &ret.Spec.DataPools[i].PoolSpec
// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(poolSpec, initStorageCluster)
}

// set device class for metadata pool from the default data pool
ret.Spec.MetadataPool.DeviceClass = ret.Spec.DataPools[0].PoolSpec.DeviceClass
ret.Spec.MetadataPool.EnableCrushUpdates = true
// Set default values in the metadata pool spec as necessary
setDefaultDataPoolSpec(&ret.Spec.MetadataPool.PoolSpec, initStorageCluster)

err := controllerutil.SetControllerReference(initStorageCluster, ret, r.Scheme)
if err != nil {
Expand Down
14 changes: 6 additions & 8 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,8 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
},
Spec: cephv1.ObjectStoreSpec{
PreservePoolsOnDelete: false,
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata),
},
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: initData.Spec.ManagedResources.CephObjectStores.MetadataPoolSpec, // Pass the poolSpec from the storageCluster CR
Gateway: cephv1.GatewaySpec{
Port: 80,
SecurePort: 443,
Expand Down Expand Up @@ -204,9 +199,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork
}

// Set default values in the poolSpec as necessary
// Set default values in the data pool spec as necessary
setDefaultDataPoolSpec(&obj.Spec.DataPool, initData)

// Set default values in the metadata pool spec as necessary
setDefaultMetadataPoolSpec(&obj.Spec.MetadataPool, initData)

// if kmsConfig is not 'nil', add the KMS details to ObjectStore spec
if kmsConfigMap != nil {

Expand Down
Loading

0 comments on commit a22914a

Please sign in to comment.