Skip to content

Commit

Permalink
New drainer
Browse files Browse the repository at this point in the history
Signed-off-by: Artiom Diomin <[email protected]>
  • Loading branch information
kron4eg committed Jul 16, 2021
1 parent 09c6b57 commit 7d84a6b
Show file tree
Hide file tree
Showing 11 changed files with 168 additions and 87 deletions.
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ require (
k8s.io/component-base v0.19.4
k8s.io/kube-aggregator v0.19.4
k8s.io/kube-proxy v0.19.4
k8s.io/kubectl v0.19.4
k8s.io/kubelet v0.19.4
sigs.k8s.io/controller-runtime v0.7.2
sigs.k8s.io/yaml v1.2.0
Expand Down
31 changes: 31 additions & 0 deletions go.sum

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions hack/tools.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package tools

import (
_ "golang.org/x/tools/cmd/stringer"

_ "k8s.io/code-generator"
_ "k8s.io/code-generator/cmd/conversion-gen"
_ "k8s.io/code-generator/cmd/deepcopy-gen"
Expand Down
100 changes: 100 additions & 0 deletions pkg/nodeutils/drain.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
Copyright 2019 The KubeOne Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package nodeutils

import (
"context"

"github.com/sirupsen/logrus"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/drain"
)

type Drainer interface {
Drain(ctx context.Context, nodeName string) error
Cordon(ctx context.Context, nodeName string, state bool) error
}

func NewDrainer(restconfig *rest.Config, logger logrus.FieldLogger) Drainer {
return &drainer{
logger: logger,
restconfig: restconfig,
}
}

type drainer struct {
logger logrus.FieldLogger
restconfig *rest.Config
}

func (dr *drainer) Drain(ctx context.Context, nodeName string) error {
drainerHelper, err := dr.drainHelper(ctx)
if err != nil {
return err
}

return drain.RunNodeDrain(drainerHelper, nodeName)
}

func (dr *drainer) Cordon(ctx context.Context, nodeName string, desired bool) error {
drainerHelper, err := dr.drainHelper(ctx)
if err != nil {
return err
}

node, err := drainerHelper.Client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return err
}

return drain.RunCordonOrUncordon(drainerHelper, node, desired)
}

func (dr *drainer) drainHelper(ctx context.Context) (*drain.Helper, error) {
kubeClinet, err := kubernetes.NewForConfig(dr.restconfig)
if err != nil {
return nil, err
}

return &drain.Helper{
Ctx: ctx,
Client: kubeClinet,
GracePeriodSeconds: -1,
IgnoreAllDaemonSets: true,
DeleteLocalData: true,
Out: loggerIoWriter(dr.logger.Infof),
ErrOut: loggerIoWriter(dr.logger.Errorf),
OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) {
evicted := "evicted"
if !usingEviction {
evicted = "deleted"
}
dr.logger.Infof("pod %q/%q is %s", pod.GetNamespace(), pod.GetName(), evicted)
},
}, nil
}

type loggerIoWriter func(format string, args ...interface{})

func (lw loggerIoWriter) Write(p []byte) (n int, err error) {
lw("%s", p)
return len(p), nil
}
11 changes: 0 additions & 11 deletions pkg/scripts/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,6 @@ var (
echo "$fqdn"
`)

drainNodeScriptTemplate = heredoc.Doc(`
sudo KUBECONFIG=/etc/kubernetes/admin.conf \
kubectl drain {{ .NODE_NAME }} --ignore-daemonsets --delete-local-data
`)

restartKubeAPIServerCrictlTemplate = heredoc.Doc(`
apiserver_id=$(sudo crictl ps --name=kube-apiserver -q)
[ -z "$apiserver_id" ] && exit 1
Expand All @@ -47,12 +42,6 @@ var (
`)
)

func DrainNode(nodeName string) (string, error) {
return Render(drainNodeScriptTemplate, Data{
"NODE_NAME": nodeName,
})
}

func Hostname() string {
return hostnameScript
}
Expand Down
33 changes: 0 additions & 33 deletions pkg/scripts/node_test.go

This file was deleted.

4 changes: 0 additions & 4 deletions pkg/scripts/testdata/TestDrainNode.golden

This file was deleted.

28 changes: 0 additions & 28 deletions pkg/tasks/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,34 +37,6 @@ import (
"sigs.k8s.io/yaml"
)

func drainNode(s *state.State, node kubeoneapi.HostConfig) error {
cmd, err := scripts.DrainNode(node.Hostname)
if err != nil {
return err
}

return s.RunTaskOnLeader(func(s *state.State, _ *kubeoneapi.HostConfig, _ ssh.Connection) error {
_, _, err := s.Runner.RunRaw(cmd)

return err
})
}

func uncordonNode(s *state.State, host kubeoneapi.HostConfig) error {
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
var node corev1.Node

if err := s.DynamicClient.Get(s.Context, types.NamespacedName{Name: host.Hostname}, &node); err != nil {
return err
}

node.Spec.Unschedulable = false
return s.DynamicClient.Update(s.Context, &node)
})

return errors.WithStack(updateErr)
}

func restartKubeAPIServer(s *state.State) error {
s.Logger.Infoln("Restarting unhealthy API servers if needed...")

Expand Down
14 changes: 11 additions & 3 deletions pkg/tasks/upgrade_follower.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/pkg/errors"

kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone"
"k8c.io/kubeone/pkg/nodeutils"
"k8c.io/kubeone/pkg/ssh"
"k8c.io/kubeone/pkg/state"
)
Expand All @@ -38,8 +39,15 @@ func upgradeFollowerExecutor(s *state.State, node *kubeoneapi.HostConfig, conn s
return errors.Wrap(err, "failed to label follower control plane node")
}

logger.Infoln("Draining follower control plane...")
if err := drainNode(s, *node); err != nil {
drainer := nodeutils.NewDrainer(s.RESTConfig, logger)

logger.Infoln("Cordon the follower control plane node...")
if err := drainer.Cordon(s.Context, node.Hostname, true); err != nil {
return errors.Wrap(err, "failed to cordon follower control plane node")
}

logger.Infoln("Drain the follower control plane node...")
if err := drainer.Drain(s.Context, node.Hostname); err != nil {
return errors.Wrap(err, "failed to drain follower control plane node")
}

Expand All @@ -59,7 +67,7 @@ func upgradeFollowerExecutor(s *state.State, node *kubeoneapi.HostConfig, conn s
}

logger.Infoln("Uncordoning follower control plane...")
if err := uncordonNode(s, *node); err != nil {
if err := drainer.Cordon(s.Context, node.Hostname, false); err != nil {
return errors.Wrap(err, "failed to uncordon follower control plane node")
}

Expand Down
16 changes: 12 additions & 4 deletions pkg/tasks/upgrade_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/pkg/errors"

kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone"
"k8c.io/kubeone/pkg/nodeutils"
"k8c.io/kubeone/pkg/ssh"
"k8c.io/kubeone/pkg/state"
)
Expand All @@ -38,9 +39,16 @@ func upgradeLeaderExecutor(s *state.State, node *kubeoneapi.HostConfig, conn ssh
return errors.Wrap(err, "failed to label leader control plane node")
}

drainer := nodeutils.NewDrainer(s.RESTConfig, logger)

logger.Infoln("Cordoning leader control plane...")
if err := drainer.Cordon(s.Context, node.Hostname, true); err != nil {
return errors.Wrap(err, "failed to cordon follower control plane node")
}

logger.Infoln("Draining leader control plane...")
if err := drainNode(s, *node); err != nil {
return errors.Wrap(err, "failed to drain leader control plane node")
if err := drainer.Drain(s.Context, node.Hostname); err != nil {
return errors.Wrap(err, "failed to drain follower control plane node")
}

logger.Infoln("Upgrading kubeadm binary on the leader control plane...")
Expand All @@ -59,8 +67,8 @@ func upgradeLeaderExecutor(s *state.State, node *kubeoneapi.HostConfig, conn ssh
}

logger.Infoln("Uncordoning leader control plane...")
if err := uncordonNode(s, *node); err != nil {
return errors.Wrap(err, "failed to uncordon leader control plane node")
if err := drainer.Cordon(s.Context, node.Hostname, false); err != nil {
return errors.Wrap(err, "failed to uncordon follower control plane node")
}

logger.Infof("Waiting %v to ensure all components are up...", timeoutNodeUpgrade)
Expand Down
16 changes: 12 additions & 4 deletions pkg/tasks/upgrade_static_workers.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/pkg/errors"

kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone"
"k8c.io/kubeone/pkg/nodeutils"
"k8c.io/kubeone/pkg/ssh"
"k8c.io/kubeone/pkg/state"
)
Expand All @@ -40,9 +41,16 @@ func upgradeStaticWorkersExecutor(s *state.State, node *kubeoneapi.HostConfig, c
return errors.Wrap(err, "failed to label static worker node")
}

drainer := nodeutils.NewDrainer(s.RESTConfig, logger)

logger.Infoln("Cordoning static worker node...")
if err := drainer.Cordon(s.Context, node.Hostname, true); err != nil {
return errors.Wrap(err, "failed to cordon follower control plane node")
}

logger.Infoln("Draining static worker node...")
if err := drainNode(s, *node); err != nil {
return errors.Wrap(err, "failed to drain static worker node")
if err := drainer.Drain(s.Context, node.Hostname); err != nil {
return errors.Wrap(err, "failed to drain follower control plane node")
}

logger.Infoln("Upgrading Kubernetes binaries on static worker node...")
Expand All @@ -61,8 +69,8 @@ func upgradeStaticWorkersExecutor(s *state.State, node *kubeoneapi.HostConfig, c
}

logger.Infoln("Uncordoning static worker node...")
if err := uncordonNode(s, *node); err != nil {
return errors.Wrap(err, "failed to uncordon static worker node")
if err := drainer.Cordon(s.Context, node.Hostname, false); err != nil {
return errors.Wrap(err, "failed to uncordon follower control plane node")
}

logger.Infof("Waiting %v to ensure all components are up...", timeoutNodeUpgrade)
Expand Down

0 comments on commit 7d84a6b

Please sign in to comment.