Skip to content

Commit

Permalink
Merge pull request #2939 from Poor12/automated-cherry-pick-of-#2930-u…
Browse files Browse the repository at this point in the history
…pstream-release-1.2

Automated cherry pick of #2930: fix work status not sync to control plane
  • Loading branch information
karmada-bot authored Dec 13, 2022
2 parents fbddb6a + 882f9e4 commit 739d2c3
Show file tree
Hide file tree
Showing 5 changed files with 135 additions and 23 deletions.
1 change: 0 additions & 1 deletion pkg/controllers/status/cluster_status_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,6 @@ func (c *ClusterStatusController) syncClusterStatus(cluster *clusterv1alpha1.Clu
if !online && readyCondition.Status != metav1.ConditionTrue {
klog.V(2).Infof("Cluster(%s) still offline after %s, ensuring offline is set.",
cluster.Name, c.ClusterFailureThreshold.Duration)
c.InformerManager.Stop(cluster.Name)
setTransitionTime(cluster.Status.Conditions, readyCondition)
meta.SetStatusCondition(&currentClusterStatus.Conditions, *readyCondition)
return c.updateStatusIfNeeded(cluster, currentClusterStatus)
Expand Down
8 changes: 6 additions & 2 deletions test/e2e/framework/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,13 @@ func WaitDeploymentDisappearOnClusters(clusters []string, namespace, name string
// UpdateDeploymentReplicas update deployment's replicas.
func UpdateDeploymentReplicas(client kubernetes.Interface, deployment *appsv1.Deployment, replicas int32) {
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s replicas to %d", deployment.Namespace, deployment.Name, replicas), func() {
deployment.Spec.Replicas = &replicas
gomega.Eventually(func() error {
_, err := client.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
deploy, err := client.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
if err != nil {
return err
}
deploy.Spec.Replicas = &replicas
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return err
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
})
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/propagationpolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ var _ = ginkgo.Describe("[BasicPropagation] basic propagation testing", func() {
serviceNamespace = policyNamespace
serviceName = policyName

service = testhelper.NewService(serviceNamespace, serviceName)
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeClusterIP)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: service.APIVersion,
Expand Down
143 changes: 126 additions & 17 deletions test/e2e/resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"

clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
"github.com/karmada-io/karmada/test/helper"
testhelper "github.com/karmada-io/karmada/test/helper"
)

var _ = ginkgo.Describe("[resource-status collection] resource status collection testing", func() {
Expand All @@ -46,8 +48,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
deploymentNamespace = testNamespace
deploymentName = policyName

deployment = helper.NewDeployment(deploymentNamespace, deploymentName)
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Expand Down Expand Up @@ -123,9 +125,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
serviceNamespace = testNamespace
serviceName = policyName

service = helper.NewService(serviceNamespace, serviceName)
service.Spec.Type = corev1.ServiceTypeLoadBalancer
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeLoadBalancer)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: service.APIVersion,
Kind: service.Kind,
Expand Down Expand Up @@ -196,9 +197,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
serviceNamespace = testNamespace
serviceName = policyName

service = helper.NewService(serviceNamespace, serviceName)
service.Spec.Type = corev1.ServiceTypeNodePort
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
service = testhelper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeNodePort)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: service.APIVersion,
Kind: service.Kind,
Expand Down Expand Up @@ -266,8 +266,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
ingNamespace = testNamespace
ingName = policyName

ingress = helper.NewIngress(ingNamespace, ingName)
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
ingress = testhelper.NewIngress(ingNamespace, ingName)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: ingress.APIVersion,
Kind: ingress.Kind,
Expand Down Expand Up @@ -338,8 +338,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
jobNamespace = testNamespace
jobName = policyName

job = helper.NewJob(jobNamespace, jobName)
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
job = testhelper.NewJob(jobNamespace, jobName)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: job.APIVersion,
Kind: job.Kind,
Expand Down Expand Up @@ -392,8 +392,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
daemonSetNamespace = testNamespace
daemonSetName = policyName

daemonSet = helper.NewDaemonSet(daemonSetNamespace, daemonSetName)
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
daemonSet = testhelper.NewDaemonSet(daemonSetNamespace, daemonSetName)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: daemonSet.APIVersion,
Kind: daemonSet.Kind,
Expand Down Expand Up @@ -479,8 +479,8 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
statefulSetNamespace = testNamespace
statefulSetName = policyName

statefulSet = helper.NewStatefulSet(statefulSetNamespace, statefulSetName)
policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
statefulSet = testhelper.NewStatefulSet(statefulSetNamespace, statefulSetName)
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: statefulSet.APIVersion,
Kind: statefulSet.Kind,
Expand Down Expand Up @@ -545,3 +545,112 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
})
})
})

var _ = framework.SerialDescribe("workload status synchronization testing", func() {
ginkgo.Context("Deployment status synchronization when cluster failed and recovered soon", func() {
var policyNamespace, policyName string
var deploymentNamespace, deploymentName string
var deployment *appsv1.Deployment
var policy *policyv1alpha1.PropagationPolicy
var originalReplicas, numOfFailedClusters int

ginkgo.BeforeEach(func() {
policyNamespace = testNamespace
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
deploymentNamespace = testNamespace
deploymentName = policyName
deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName)
numOfFailedClusters = 1
originalReplicas = 3

policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
LabelSelector: &metav1.LabelSelector{
// only test push mode clusters
// because pull mode clusters cannot be disabled by changing APIEndpoint
MatchLabels: pushModeClusterLabels,
},
},
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated,
},
})
})

ginkgo.BeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
ginkgo.DeferCleanup(func() {
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})

ginkgo.It("deployment status synchronization testing", func() {
var disabledClusters []string
targetClusterNames := framework.ExtractTargetClustersFrom(controlPlaneClient, deployment)

ginkgo.By("set one cluster condition status to false", func() {
temp := numOfFailedClusters
for _, targetClusterName := range targetClusterNames {
if temp > 0 {
klog.Infof("Set cluster %s to disable.", targetClusterName)
err := disableCluster(controlPlaneClient, targetClusterName)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

// wait for the current cluster status changing to false
framework.WaitClusterFitWith(controlPlaneClient, targetClusterName, func(cluster *clusterv1alpha1.Cluster) bool {
return !util.IsClusterReady(&cluster.Status)
})
disabledClusters = append(disabledClusters, targetClusterName)
temp--
}
}
})

ginkgo.By("recover not ready cluster", func() {
for _, disabledCluster := range disabledClusters {
fmt.Printf("cluster %s is waiting for recovering\n", disabledCluster)
originalAPIEndpoint := getClusterAPIEndpoint(disabledCluster)

err := recoverCluster(controlPlaneClient, disabledCluster, originalAPIEndpoint)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
// wait for the disabled cluster recovered
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
currentCluster, err := util.GetCluster(controlPlaneClient, disabledCluster)
g.Expect(err).ShouldNot(gomega.HaveOccurred())

if util.IsClusterReady(&currentCluster.Status) {
fmt.Printf("cluster %s recovered\n", disabledCluster)
return true, nil
}
return false, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
})

ginkgo.By("edit deployment in disabled cluster", func() {
for _, disabledCluster := range disabledClusters {
clusterClient := framework.GetClusterClient(disabledCluster)
framework.UpdateDeploymentReplicas(clusterClient, deployment, updateDeploymentReplicas)
// wait for the status synchronization
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
currentDeployment, err := clusterClient.AppsV1().Deployments(testNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
g.Expect(err).ShouldNot(gomega.HaveOccurred())

if *currentDeployment.Spec.Replicas == int32(originalReplicas) {
return true, nil
}
return false, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
}
})
})
})
})
4 changes: 2 additions & 2 deletions test/helper/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func NewStatefulSet(namespace string, name string) *appsv1.StatefulSet {
}

// NewService will build a service object.
func NewService(namespace string, name string) *corev1.Service {
func NewService(namespace string, name string, svcType corev1.ServiceType) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Expand All @@ -139,6 +139,7 @@ func NewService(namespace string, name string) *corev1.Service {
Name: name,
},
Spec: corev1.ServiceSpec{
Type: svcType,
Ports: []corev1.ServicePort{
{
Name: "http",
Expand All @@ -147,7 +148,6 @@ func NewService(namespace string, name string) *corev1.Service {
TargetPort: intstr.IntOrString{IntVal: 8080},
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
}
Expand Down

0 comments on commit 739d2c3

Please sign in to comment.