Skip to content

Commit

Permalink
test: add machine removal test
Browse files Browse the repository at this point in the history
Remove the machine, wait for cluster to become healthy again.

Signed-off-by: Artem Chernyshev <[email protected]>
  • Loading branch information
Unix4ever committed Sep 20, 2021
1 parent 6ad6aac commit a616f4b
Show file tree
Hide file tree
Showing 6 changed files with 153 additions and 236 deletions.
2 changes: 1 addition & 1 deletion controllers/configs.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func (r *TalosControlPlaneReconciler) talosconfigForMachines(ctx context.Context
}

// grab all addresses as endpoints
node, err := clientset.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(machine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
Expand Down
10 changes: 5 additions & 5 deletions controllers/taloscontrolplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,
if !machine.ObjectMeta.DeletionTimestamp.IsZero() {
r.Log.Info("Machine is in process of deletion", "machine", machine.Name)

node, err := clientset.CoreV1().Nodes().Get(ctx, machine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(machine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
// It's possible for the node to already be deleted in the workload cluster, so we just
// requeue if that's that case instead of throwing a scary error.
Expand All @@ -369,7 +369,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,

r.Log.Info("Deleting node", "machine", machine.Name, "node", node.Name)

err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
err = clientset.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{})
if err != nil {
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
}
Expand Down Expand Up @@ -442,7 +442,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(ctx context.Context,

r.Log.Info("Deleting node", "machine", deleteMachine.Name, "node", node.Name)

err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
err = clientset.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{})
if err != nil {
return ctrl.Result{RequeueAfter: 20 * time.Second}, err
}
Expand Down Expand Up @@ -639,7 +639,7 @@ func (r *TalosControlPlaneReconciler) updateStatus(ctx context.Context, tcp *con
return fmt.Errorf("machine %q does not have a noderef", ownedMachine.Name)
}

node, err := clientset.CoreV1().Nodes().Get(ctx, ownedMachine.Status.NodeRef.Name, metav1.GetOptions{})
node, err := clientset.CoreV1().Nodes().Get(ownedMachine.Status.NodeRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get node %q: %w", node.Name, err)
}
Expand Down Expand Up @@ -680,7 +680,7 @@ func (r *TalosControlPlaneReconciler) updateStatus(ctx context.Context, tcp *con
// We consider ourselves "initialized" if the workload cluster returns any number of nodes.
// We also do not return client list errors (just log them) as it's expected that it will fail
// for a while until the cluster is up.
nodeList, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err == nil {
if len(nodeList.Items) > 0 {
tcp.Status.Initialized = true
Expand Down
28 changes: 10 additions & 18 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,26 @@ module github.com/talos-systems/cluster-api-control-plane-provider-talos

go 1.16

replace (
// keep older versions of k8s.io packages to keep compatiblity with cluster-api
k8s.io/api v0.21.3 => k8s.io/api v0.20.5
k8s.io/api-server v0.21.3 => k8s.io/api-server v0.20.5
k8s.io/apimachinery v0.21.3 => k8s.io/apimachinery v0.20.5
k8s.io/client-go v0.21.3 => k8s.io/client-go v0.20.5

sigs.k8s.io/cluster-api v0.3.20 => sigs.k8s.io/cluster-api v0.3.9
)

require (
github.com/coreos/go-semver v0.3.0
github.com/go-logr/logr v0.4.0
github.com/go-logr/zapr v0.2.0 // indirect
github.com/google/uuid v1.1.2
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.14.0
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.7.0
github.com/talos-systems/capi-utils v0.0.0-20210910152701-028c7d3c0257
github.com/talos-systems/capi-utils v0.0.0-20210917140904-9587089e8425
github.com/talos-systems/cluster-api-bootstrap-provider-talos v0.2.0
github.com/talos-systems/go-retry v0.3.1
github.com/talos-systems/talos/pkg/machinery v0.12.0
google.golang.org/grpc v1.40.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/apiserver v0.21.3
k8s.io/client-go v0.21.3
k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471
sigs.k8s.io/cluster-api v0.3.20
sigs.k8s.io/controller-runtime v0.6.3
k8s.io/api v0.17.9
k8s.io/apimachinery v0.17.9
k8s.io/apiserver v0.17.9
k8s.io/client-go v0.17.9
k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19
sigs.k8s.io/cluster-api v0.3.23
sigs.k8s.io/controller-runtime v0.5.14
)
Loading

0 comments on commit a616f4b

Please sign in to comment.