Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "add e2e test for deployment replicas syncer" #4726

Merged
merged 1 commit into from
Mar 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
234 changes: 59 additions & 175 deletions test/e2e/deploymentreplicassyncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package e2e

import (
"context"
"sort"
"time"

"github.com/onsi/ginkgo/v2"
Expand All @@ -31,196 +30,81 @@ import (
"k8s.io/utils/pointer"

policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
"github.com/karmada-io/karmada/test/helper"
)

var _ = ginkgo.Describe("deployment replicas syncer testing", func() {
var namespace string
var deploymentName, hpaName, policyName, bindingName string
var deployment *appsv1.Deployment
var hpa *autoscalingv2.HorizontalPodAutoscaler
var policy *policyv1alpha1.PropagationPolicy
var targetClusters []string
var _ = ginkgo.Describe("hpa replicas synchronization testing", func() {
ginkgo.Context("Replicas synchronization testing", func() {
var initReplicas = int32(1)
var policyNamespace, policyName string
var namespace, deploymentName, hpaName string
var deployment *appsv1.Deployment
var hpa *autoscalingv2.HorizontalPodAutoscaler
var policy *policyv1alpha1.PropagationPolicy

ginkgo.BeforeEach(func() {
namespace = testNamespace
deploymentName = deploymentNamePrefix + rand.String(RandomStrLength)
hpaName = deploymentName
policyName = deploymentName
bindingName = names.GenerateBindingName(util.DeploymentKind, deploymentName)

// sort member clusters in increasing order
targetClusters = framework.ClusterNames()[0:2]
sort.Strings(targetClusters)

deployment = helper.NewDeployment(namespace, deploymentName)
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
hpa.Spec.MinReplicas = pointer.Int32(2)
policy = helper.NewPropagationPolicy(namespace, policyName, []policyv1alpha1.ResourceSelector{
{APIVersion: deployment.APIVersion, Kind: deployment.Kind, Name: deployment.Name},
{APIVersion: hpa.APIVersion, Kind: hpa.Kind, Name: hpa.Name},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: targetClusters,
},
})
})

ginkgo.JustBeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateHPA(kubeClient, hpa)

ginkgo.DeferCleanup(func() {
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
framework.WaitDeploymentDisappearOnClusters(targetClusters, deployment.Namespace, deployment.Name)
})
})

ginkgo.Context("when policy is Divided schedule type, each cluster have more that one replica", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(4)
})

// Case 1: Deployment(replicas=4) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=2)
// Expected result: hpa scaling can take effect in updating spec, while manually modify not.
ginkgo.It("general case combined hpa scaling and manually modify in Divided type", func() {
ginkgo.By("step1: propagate 4 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
})

ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
})

ginkgo.By("step3: manually add deployment template replicas from 6 to 10", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 10)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
})

ginkgo.By("step4: manually decrease deployment template replicas from 6 to 2", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
policyNamespace = testNamespace
namespace = testNamespace
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
deploymentName = policyName
hpaName = policyName

deployment = helper.NewDeployment(namespace, deploymentName)
deployment.Spec.Replicas = pointer.Int32(initReplicas)
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
hpa.Spec.MinReplicas = pointer.Int32(2)

policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
{
APIVersion: hpa.APIVersion,
Kind: hpa.Kind,
Name: hpa.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
})
})
})

ginkgo.Context("when policy is Divided schedule type, one cluster have no replica", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(1)
hpa.Spec.MinReplicas = pointer.Int32(1)
})

// Case 2: Deployment(replicas=1) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
// Expected result: manually modify can take effect in updating spec.
ginkgo.It("0/1 case, manually modify replicas from 1 to 2", func() {
ginkgo.By("step1: propagate 1 replicas to two clusters", func() {
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})

ginkgo.By("step2: manually add deployment template replicas from 1 to 2", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateHPA(kubeClient, hpa)
ginkgo.DeferCleanup(func() {
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
})
})
})

ginkgo.Context("when policy is Divided schedule type, remove one cluster's replicas", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(2)
hpa.Spec.MinReplicas = pointer.Int32(1)
})

// Case 3: Deployment(replicas=2) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
// Expected result: manually modify can take effect in updating spec.
ginkgo.It("0/1 case, manually modify replicas from 2 to 1", func() {
ginkgo.By("step1: propagate 2 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

ginkgo.By("step2: manually add deployment template replicas from 2 to 1", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
framework.WaitResourceBindingFitWith(karmadaClient, namespace, bindingName, func(rb *workv1alpha2.ResourceBinding) bool {
return len(rb.Status.AggregatedStatus) == 1
ginkgo.It("deployment has been scaled up and synchronized to Karmada", func() {
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return true
})
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})
})
})

ginkgo.Context("when policy is Duplicated schedule type", func() {
ginkgo.BeforeEach(func() {
deployment.Spec.Replicas = pointer.Int32(2)
})

// Case 4: Deployment(replicas=2) | Policy(Duplicated, two clusters) | HPA(minReplicas=2)
// Expected result: hpa scaling not take effect in updating spec, manually modify spec have no action.
ginkgo.It("general case combined hpa scaling and manually modify in Duplicated type", func() {
ginkgo.By("step1: propagate each 2 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

ginkgo.By("step3: manually add deployment template replicas from 2 to 4", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 4)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
})
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return *deployment.Spec.Replicas == initReplicas
})

ginkgo.By("step4: manually decrease deployment template replicas from 2 to 1", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})
expectedReplicas := initReplicas
gomega.Eventually(func() bool {
deploymentExist, err := kubeClient.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
if err != nil {
return false
}
klog.Infof("got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedReplicas)
return (*deploymentExist.Spec.Replicas == expectedReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
}, time.Minute, pollInterval).Should(gomega.Equal(true))
})
})
})

// assertDeploymentWorkloadReplicas assert replicas in each member cluster eventually equal to @expectedReplicas
func assertDeploymentWorkloadReplicas(namespace, name string, clusters []string, expectedReplicas []int32) {
gomega.Expect(len(clusters)).Should(gomega.Equal(len(expectedReplicas)))
for i, cluster := range clusters {
if expectedReplicas[i] == 0 {
framework.WaitDeploymentDisappearOnCluster(cluster, namespace, name)
return
}
framework.WaitDeploymentPresentOnClustersFitWith([]string{cluster}, namespace, name, func(deployment *appsv1.Deployment) bool {
klog.Infof("in %s cluster, got: %d, expect: %d", cluster, *deployment.Spec.Replicas, expectedReplicas[i])
return *deployment.Spec.Replicas == expectedReplicas[i]
})
}
}

// assertDeploymentTemplateReplicas assert replicas in template spec eventually equal to @expectedSpecReplicas
func assertDeploymentTemplateReplicas(namespace, name string, expectedSpecReplicas int32) {
gomega.Eventually(func() bool {
deploymentExist, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false
}
klog.Infof("template spec replicas, got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedSpecReplicas)
return (*deploymentExist.Spec.Replicas == expectedSpecReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
}, time.Minute, pollInterval).Should(gomega.Equal(true))
}
12 changes: 0 additions & 12 deletions test/e2e/framework/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,3 @@ func RemoveHPA(client kubernetes.Interface, namespace, name string) {
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// UpdateHPAWithMinReplicas update HPA with replicas.
func UpdateHPAWithMinReplicas(client kubernetes.Interface, namespace, name string, minReplicas int32) {
ginkgo.By(fmt.Sprintf("Updating HPA(%s/%s)", namespace, name), func() {
newHPA, err := client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

newHPA.Spec.MinReplicas = &minReplicas
_, err = client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Update(context.TODO(), newHPA, metav1.UpdateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
21 changes: 0 additions & 21 deletions test/helper/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package helper

import (
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -83,26 +82,6 @@ func NewExplicitPriorityClusterPropagationPolicy(policyName string, rsSelectors
}
}

// NewStaticWeightPolicyStrategy create static weight policy strategy with specific weights
// e.g: @clusters=[member1, member2], @weights=[1, 1], means static weight `member1:member2=1:1`
func NewStaticWeightPolicyStrategy(clusters []string, weights []int64) *policyv1alpha1.ReplicaSchedulingStrategy {
gomega.Expect(len(clusters)).Should(gomega.Equal(len(weights)))
staticWeightList := make([]policyv1alpha1.StaticClusterWeight, 0)
for i, clusterName := range clusters {
staticWeightList = append(staticWeightList, policyv1alpha1.StaticClusterWeight{
TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{clusterName}},
Weight: weights[i],
})
}
return &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
StaticWeightList: staticWeightList,
},
}
}

// NewOverridePolicy will build a OverridePolicy object.
func NewOverridePolicy(namespace, policyName string, rsSelectors []policyv1alpha1.ResourceSelector, clusterAffinity policyv1alpha1.ClusterAffinity, overriders policyv1alpha1.Overriders) *policyv1alpha1.OverridePolicy {
return &policyv1alpha1.OverridePolicy{
Expand Down
Loading