From 463f76765d843e512703e3029f0ecca2aed6e053 Mon Sep 17 00:00:00 2001 From: lihongyan1 Date: Wed, 28 Feb 2024 17:01:21 +0800 Subject: [PATCH] test: stabilize case single_prometheus_replica_has_no_pdb (#425) * test: stalize monitoringstack cases Refactor code and also enhance the following cases TestMonitoringStackController/single_prometheus_replica_has_no_pdb TestMonitoringStackController/Alertmanager_disabled TestMonitoringStackController/Verify_ability_to_scale_down_Prometheus * test: fixed review comments --- test/e2e/framework/monitoring_stack.go | 47 ++++++++++++++++++++ test/e2e/monitoring_stack_controller_test.go | 25 +++-------- 2 files changed, 53 insertions(+), 19 deletions(-) create mode 100644 test/e2e/framework/monitoring_stack.go diff --git a/test/e2e/framework/monitoring_stack.go b/test/e2e/framework/monitoring_stack.go new file mode 100644 index 00000000..62235d6c --- /dev/null +++ b/test/e2e/framework/monitoring_stack.go @@ -0,0 +1,47 @@ +package framework + +import ( + "context" + "testing" + + stack "github.com/rhobs/observability-operator/pkg/apis/monitoring/v1alpha1" + "gotest.tools/v3/assert" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" +) + +type MonitoringStackConfig func(monitoringStack *stack.MonitoringStack) + +func SetPrometheusReplicas(replicas int32) MonitoringStackConfig { + return func(ms *stack.MonitoringStack) { + ms.Spec.PrometheusConfig.Replicas = &replicas + } +} + +func SetResourceSelector(resourceSelector *v1.LabelSelector) MonitoringStackConfig { + return func(ms *stack.MonitoringStack) { + ms.Spec.ResourceSelector = resourceSelector + } +} + +func SetAlertmanagerDisabled(disabled bool) MonitoringStackConfig { + return func(ms *stack.MonitoringStack) { + ms.Spec.AlertmanagerConfig.Disabled = disabled + } +} + +// UpdateWithRetry updates monitoringstack with retry +func (f *Framework) UpdateWithRetry(t *testing.T, ms *stack.MonitoringStack, fns ...MonitoringStackConfig) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + key := types.NamespacedName{Name: ms.Name, Namespace: ms.Namespace} + err := f.K8sClient.Get(context.Background(), key, ms) + assert.NilError(t, err, "failed to get a monitoring stack") + for _, fn := range fns { + fn(ms) + } + err = f.K8sClient.Update(context.Background(), ms) + return err + }) + return err +} diff --git a/test/e2e/monitoring_stack_controller_test.go b/test/e2e/monitoring_stack_controller_test.go index b2890be9..0418143a 100644 --- a/test/e2e/monitoring_stack_controller_test.go +++ b/test/e2e/monitoring_stack_controller_test.go @@ -144,8 +144,7 @@ func nilResrouceSelectorPropagatesToPrometheus(t *testing.T) { updatedMS := &stack.MonitoringStack{} f.GetResourceWithRetry(t, ms.Name, ms.Namespace, updatedMS) - updatedMS.Spec.ResourceSelector = nil - err = f.K8sClient.Update(context.Background(), updatedMS) + err = f.UpdateWithRetry(t, updatedMS, framework.SetResourceSelector(nil)) assert.NilError(t, err, "failed to patch monitoring stack with nil resource selector") prometheus := monv1.Prometheus{} @@ -392,13 +391,8 @@ func singlePrometheusReplicaHasNoPDB(t *testing.T) { pdbName := ms.Name + "-prometheus" f.AssertResourceEventuallyExists(pdbName, ms.Namespace, &pdb)(t) - // Update replica count to 1 and assert that pdb is removed - key := types.NamespacedName{Name: ms.Name, Namespace: ms.Namespace} - err = f.K8sClient.Get(context.Background(), key, ms) - assert.NilError(t, err, "failed to get a monitoring stack") - - ms.Spec.PrometheusConfig.Replicas = intPtr(1) - err = f.K8sClient.Update(context.Background(), ms) + // Update replica count to 1 + err = f.UpdateWithRetry(t, ms, framework.SetPrometheusReplicas(1)) assert.NilError(t, err, "failed to update monitoring stack") // ensure there is no pdb @@ -476,10 +470,8 @@ func assertAlertmanagerDeployedAndRemoved(t *testing.T) { key := types.NamespacedName{Name: ms.Name, Namespace: ms.Namespace} err := f.K8sClient.Get(context.Background(), key, &am) assert.NilError(t, err) - - updatedMS.Spec.AlertmanagerConfig.Disabled = true - err = f.K8sClient.Update(context.Background(), &updatedMS) - assert.NilError(t, err) + err = f.UpdateWithRetry(t, &updatedMS, framework.SetAlertmanagerDisabled(true)) + assert.NilError(t, err, "failed to update monitoring stack to disable alertmanager") f.AssertAlertmanagerAbsent(t, updatedMS.Name, updatedMS.Namespace) } @@ -576,13 +568,8 @@ func prometheusScaleDown(t *testing.T) { assert.Equal(t, prom.Status.Replicas, int32(1)) + err = f.UpdateWithRetry(t, ms, framework.SetPrometheusReplicas(0)) key := types.NamespacedName{Name: ms.Name, Namespace: ms.Namespace} - err = f.K8sClient.Get(context.Background(), key, ms) - assert.NilError(t, err, "failed to get a monitoring stack") - - numOfRep = 0 - ms.Spec.PrometheusConfig.Replicas = &numOfRep - err = f.K8sClient.Update(context.Background(), ms) assert.NilError(t, err, "failed to update a monitoring stack") err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, framework.DefaultTestTimeout, true, func(ctx context.Context) (bool, error) { if err := f.K8sClient.Get(context.Background(), key, &prom); errors.IsNotFound(err) {