Skip to content

Commit

Permalink
Modification of test cases
Browse files Browse the repository at this point in the history
Signed-off-by: Feny Mehta <[email protected]>
  • Loading branch information
fbm3307 committed Nov 12, 2024
1 parent bf63303 commit 09411ad
Show file tree
Hide file tree
Showing 3 changed files with 218 additions and 48 deletions.
71 changes: 39 additions & 32 deletions pkg/cmd/adm/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ import (
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)

type NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error

type RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error

// NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config
// 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods),
// waits for the new pods to come up, then uses rollout-restart command for non-olm based - registration-service)
Expand Down Expand Up @@ -80,11 +84,11 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error {
return nil
}

return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams)
return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments)
}

// This function has the whole logic of getting the list of operator and non-operator based deployment, then proceed on restarting/deleting accordingly
func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, checker RolloutStatusCheckerFunc, restarter NonOperatorDeploymentsRestarterFunc) error {

ctx.Printlnf("Fetching the current Operator and non-Operator deployments of the operator in %s namespace", ns)
operatorDeploymentList, nonOperatorDeploymentList, err := getExistingDeployments(ctx, cl, ns)
Expand All @@ -93,43 +97,45 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client,
}
//if there is no operator deployment, no need for restart
if len(operatorDeploymentList.Items) == 0 {
return fmt.Errorf("no operator based deployment restart happened as operator deployment found in namespace %s is 0", ns)
} else {
for _, operatorDeployment := range operatorDeploymentList.Items {
ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name)
return fmt.Errorf("no operator based deployment found in namespace %s , hence no restart happened", ns)
}
//Deleting the pods of the operator based deployment and then checking the status
for _, operatorDeployment := range operatorDeploymentList.Items {
ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name)

if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil {
return err
}

Check warning on line 108 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L107-L108

Added lines #L107 - L108 were not covered by tests

ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name)
//check the rollout status
if err := checker(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil {
return err
}
}

if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil {
if len(nonOperatorDeploymentList.Items) == 0 {
// if there are no non-operator deployments
ctx.Printlnf("No Non-operator deployment found in namespace %s, hence no restart happened", ns)
return nil
}
for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items {
if nonOperatorDeployment.Name != "autoscaling-buffer" {
ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name)

if err := restarter(ctx, nonOperatorDeployment, f, ioStreams); err != nil {
return err
}

ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name)
//check the rollout status
if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil {
ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name)
if err := checker(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil {
return err
}

Check warning on line 133 in pkg/cmd/adm/restart.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/adm/restart.go#L132-L133

Added lines #L132 - L133 were not covered by tests
return nil
}

if len(nonOperatorDeploymentList.Items) != 0 {
for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items {
if nonOperatorDeployment.Name != "autoscaling-buffer" {
ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name)

if err := restartNonOlmDeployments(ctx, nonOperatorDeployment, f, ioStreams); err != nil {
return err
}
//check the rollout status
ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name)
if err := checkRolloutStatus(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil {
return err
}
}
ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace %s", ns)
}
} else {
//if there are no non-operator deployments
ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment found in namespace %s is 0", ns)
}
ctx.Printlnf("Found only autoscaling-buffer deployment in namespace %s , which is not required to be restarted", ns)
}

return nil
}

Expand All @@ -155,7 +161,7 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien

}

func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
func restartNonOperatorDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {

o := kubectlrollout.NewRolloutRestartOptions(ioStreams)

Expand All @@ -173,6 +179,7 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.
}

func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {

cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams)

if err := cmd.Complete(f, []string{"deployment"}); err != nil {
Expand Down
191 changes: 177 additions & 14 deletions pkg/cmd/adm/restart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package adm

import (
"bytes"
"fmt"
"io"
"net/http"
"testing"
Expand All @@ -24,6 +25,7 @@ import (
"k8s.io/client-go/rest/fake"
cgtesting "k8s.io/client-go/testing"
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
)

Expand Down Expand Up @@ -157,19 +159,12 @@ func TestRestartDeployment(t *testing.T) {
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams)
err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams, checkRolloutStatus, restartNonOperatorDeployments)

//then
actualPod := &corev1.Pod{}

//checking the whole flow(starting with operator deployments & then to non operator deployments)
if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" && tc.name1 != "autoscaling-buffer" {
//checking the flow for operator deployments
require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in")
require.Contains(t, term.Output(), "Proceeding to delete the Pods of")
require.Contains(t, term.Output(), "Listing the pods to be deleted")
require.Contains(t, term.Output(), "Starting to delete the pods")
err = fakeClient.Get(ctx, namespacedName, actualPod)
//pods are actually deleted
require.True(t, apierror.IsNotFound(err))
require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment")
//checking the output from kubectl for rolloutstatus
require.Contains(t, buf.String(), tc.expectedOutput)
Expand All @@ -187,28 +182,196 @@ func TestRestartDeployment(t *testing.T) {
require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment")
} else if tc.labelValue == "codeready-toolchain" {
//Checking the logic where no operator deployments are there
require.Error(t, err, "no operator based deployment restart happened as operator deployment found in namespace")
require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened")
assert.Equal(t, 0, csCalls)
} else if tc.labelValue == "kubesaw-controller-manager" && tc.name1 != "autoscaling-buffer" {
//checking the logic when only operator based deployment is there and no non-operator based
require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace")
require.Contains(t, term.Output(), "No Non-operator deployment found in namespace", tc.namespace, ", hence no restart happened")
assert.Equal(t, 1, csCalls)
} else if tc.name1 == "autoscaling-buffer" {
require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace")
require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted")
assert.Equal(t, 1, csCalls)
}

})
}
}

func TestOperator(t *testing.T) {
//given
testIOStreams := genericclioptions.NewTestIOStreamsDiscard()
SetFileConfig(t, Host())
toolchainCluster := NewToolchainCluster(ToolchainClusterName("host"))
hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1)
hostDeployment.Labels = make(map[string]string)
hostDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager"
regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1)
regServDeployment.Labels = make(map[string]string)
regServDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain"
hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"))
memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1)
memberDeployment.Labels = make(map[string]string)
memberDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager"
autoscalarDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "autoscaling-buffer"), 1)
autoscalarDeployment.Labels = make(map[string]string)
autoscalarDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain"
actualPod := &corev1.Pod{}
term := NewFakeTerminalWithResponse("Y")

t.Run("restart deployment returns an error if no operator based deployment found", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, regServDeployment)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams,
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)

Check failure on line 230 in pkg/cmd/adm/restart_test.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

nil-compare: use require.Nil (testifylint)
require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector)
return nil
}, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
require.Equal(t, regServDeployment, deployment)
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)

Check failure on line 236 in pkg/cmd/adm/restart_test.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

nil-compare: use require.Nil (testifylint)
return nil
})

//then
require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened")
})
t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams,
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)

Check failure on line 252 in pkg/cmd/adm/restart_test.go

View workflow job for this annotation

GitHub Actions / GolangCI Lint

nil-compare: use require.Nil (testifylint)
return nil
}, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)
return nil
})

//then
require.NoError(t, err)
//checking the flow for operator deployments
require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in")
require.Contains(t, term.Output(), "Proceeding to delete the Pods of")
require.Contains(t, term.Output(), "Listing the pods to be deleted")
require.Contains(t, term.Output(), "Starting to delete the pods")
err = fakeClient.Get(ctx, test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), actualPod)
//pods are actually deleted
require.True(t, apierror.IsNotFound(err))
require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment")
//checking the flow for non-operator deployments
require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment")
require.Contains(t, term.Output(), "Checking the status of the rolled out deployment")
})

t.Run("restart deployment works successfully when only operator based deployment", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams,
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)
return nil
}, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)
return nil
})

//then
require.NoError(t, err)
require.Contains(t, term.Output(), "No Non-operator deployment found in namespace toolchain-host-operator, hence no restart happened")
})
t.Run("rollout restart returns an error", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment, hostPod)
ctx := clicontext.NewCommandContext(term, newClient)
expectedErr := fmt.Errorf("Could not do rollout restart of the deployment")
//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams,
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)
return nil
}, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error {
require.Equal(t, testIOStreams, ioStreams)
require.Equal(t, nil, f)
return expectedErr
})

//then
require.EqualError(t, err, expectedErr.Error())
})

t.Run("rollout status for the deleted pods(operator) works", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment)
ctx := clicontext.NewCommandContext(term, newClient)

//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams,
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
require.Equal(t, testIOStreams, ioStreams)
return nil
}, nil)

//then
require.NoError(t, err)
})

t.Run("error in rollout status of the deleted pods(operator)", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment)
ctx := clicontext.NewCommandContext(term, newClient)
expectedErr := fmt.Errorf("Could not check the status of the deployment")
//when
err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, genericclioptions.NewTestIOStreamsDiscard(),
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
return expectedErr
}, nil)

//then
require.EqualError(t, err, expectedErr.Error())
})

t.Run("autoscalling deployment should not restart", func(t *testing.T) {
//given
newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalarDeployment)
ctx := clicontext.NewCommandContext(term, newClient)
//when
err := restartDeployment(ctx, fakeClient, "toolchain-member-operator", nil, genericclioptions.NewTestIOStreamsDiscard(),
func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error {
return nil
}, nil)

//then
require.NoError(t, err)
require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted")
})

}

func TestRestart(t *testing.T) {

t.Run("restart should start with y response", func(t *testing.T) {
//given
SetFileConfig(t, Host())
toolchainCluster := NewToolchainCluster(ToolchainClusterName("host"))
deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1)
deployment.Labels = make(map[string]string)
deployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager"
term := NewFakeTerminalWithResponse("Y")
newClient, _ := NewFakeClients(t, toolchainCluster, deployment)
ctx := clicontext.NewCommandContext(term, newClient)
Expand All @@ -217,7 +380,7 @@ func TestRestart(t *testing.T) {
err := restart(ctx, "host")

//then
require.Error(t, err) //we expect an error as we have not setp up any http client , just checking that it passes the cmd phase and restart method is called
require.ErrorContains(t, err, "no such host") //we expect an error as we have not set up any http client , just checking that it passes the cmd phase and restartdeployment method is called
require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in")
})

Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/adm/unregister_member_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) {
err := UnregisterMemberCluster(ctx, "member1")

// then
require.Error(t, err) // since we have not set up http client required for restart(),it will throw an error.
require.ErrorContains(t, err, "no such host") // since we have not set up http client required for restart(),it will throw an error.
// also the restart functionality is being test in restart_test.go, not deuplicating the test,
//just a assertion to make sure that restart is started
//just a assertion to make sure that restart is called and started
AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster)
assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!")
assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT")
Expand Down

0 comments on commit 09411ad

Please sign in to comment.