From 5470e07d886b90a4f7ee71e2f9a31d8141c6f243 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 8 Feb 2017 15:22:09 -0800 Subject: [PATCH] raft: Use TransferLeadership to make leader demotion safer When we demote the leader, we currently wait for all queued messages to be sent, as a best-effort approach to making sure the other nodes find out that the node removal has been committed, and stop treating the current leader as a cluster member. This doesn't work perfectly. To make this more robust, use TransferLeadership when the leader is trying to remove itself. The new leader's reconcilation loop will kick in and remove the old leader. Signed-off-by: Aaron Lehmann --- manager/controlapi/node_test.go | 19 ++--- manager/role_manager.go | 12 +++- manager/state/raft/raft.go | 87 ++++++++++++++++------- manager/state/raft/transport/transport.go | 13 ++++ 4 files changed, 90 insertions(+), 41 deletions(-) diff --git a/manager/controlapi/node_test.go b/manager/controlapi/node_test.go index 71c11ca902..bde8526bf7 100644 --- a/manager/controlapi/node_test.go +++ b/manager/controlapi/node_test.go @@ -532,7 +532,7 @@ func TestUpdateNode(t *testing.T) { assert.Error(t, err) } -func testUpdateNodeDemote(leader bool, t *testing.T) { +func testUpdateNodeDemote(t *testing.T) { tc := cautils.NewTestCA(nil) defer tc.Stop() ts := newTestServer(t) @@ -654,14 +654,8 @@ func testUpdateNodeDemote(leader bool, t *testing.T) { return nil })) - var demoteNode, lastNode *raftutils.TestNode - if leader { - demoteNode = nodes[1] - lastNode = nodes[2] - } else { - demoteNode = nodes[2] - lastNode = nodes[1] - } + demoteNode := nodes[2] + lastNode := nodes[1] raftMember = ts.Server.raft.GetMemberByNodeID(demoteNode.SecurityConfig.ClientTLSCreds.NodeID()) assert.NotNil(t, raftMember) @@ -734,10 +728,5 @@ func testUpdateNodeDemote(leader bool, t *testing.T) { func TestUpdateNodeDemote(t *testing.T) { t.Parallel() - testUpdateNodeDemote(false, t) -} - -func TestUpdateNodeDemoteLeader(t *testing.T) { - t.Parallel() - testUpdateNodeDemote(true, t) + testUpdateNodeDemote(t) } diff --git a/manager/role_manager.go b/manager/role_manager.go index 63dbfb8505..4fd95c1293 100644 --- a/manager/role_manager.go +++ b/manager/role_manager.go @@ -136,11 +136,21 @@ func (rm *roleManager) reconcileRole(node *api.Node) { rmCtx, rmCancel := context.WithTimeout(rm.ctx, 5*time.Second) defer rmCancel() + if member.RaftID == rm.raft.Config.ID { + // Don't use rmCtx, because we expect to lose + // leadership, which will cancel this context. + log.L.Info("demoted; transferring leadership") + err := rm.raft.TransferLeadership(context.Background()) + if err == nil { + return + } + log.L.WithError(err).Info("failed to transfer leadership") + } if err := rm.raft.RemoveMember(rmCtx, member.RaftID); err != nil { // TODO(aaronl): Retry later log.L.WithError(err).Debugf("can't demote node %s at this time", node.ID) - return } + return } err := rm.store.Update(func(tx store.Tx) error { diff --git a/manager/state/raft/raft.go b/manager/state/raft/raft.go index ba77ae0ef6..65b74bad77 100644 --- a/manager/state/raft/raft.go +++ b/manager/state/raft/raft.go @@ -412,7 +412,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) { defer conn.Close() client := api.NewRaftMembershipClient(conn) - joinCtx, joinCancel := context.WithTimeout(ctx, 10*time.Second) + joinCtx, joinCancel := context.WithTimeout(ctx, n.reqTimeout()) defer joinCancel() resp, err := client.Join(joinCtx, &api.JoinRequest{ Addr: n.opts.Addr, @@ -1030,6 +1030,10 @@ func (n *Node) UpdateNode(id uint64, addr string) { // from a member who is willing to leave its raft // membership to an active member of the raft func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) { + if req.Node == nil { + return nil, grpc.Errorf(codes.InvalidArgument, "no node information provided") + } + nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err @@ -1100,18 +1104,58 @@ func (n *Node) removeMember(ctx context.Context, id uint64) error { n.membershipLock.Lock() defer n.membershipLock.Unlock() - if n.CanRemoveMember(id) { - cc := raftpb.ConfChange{ - ID: id, - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - Context: []byte(""), - } - err := n.configure(ctx, cc) - return err + if !n.CanRemoveMember(id) { + return ErrCannotRemoveMember } - return ErrCannotRemoveMember + cc := raftpb.ConfChange{ + ID: id, + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + Context: []byte(""), + } + return n.configure(ctx, cc) +} + +// TransferLeadership attempts to transfer leadership to a different node, +// and wait for the transfer to happen. +func (n *Node) TransferLeadership(ctx context.Context) error { + ctx, cancelTransfer := context.WithTimeout(ctx, n.reqTimeout()) + defer cancelTransfer() + + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if !n.IsMember() { + return ErrNoRaftMember + } + + if !n.isLeader() { + return ErrLostLeadership + } + + transferee, err := n.transport.LongestActive() + if err != nil { + return errors.Wrap(err, "failed to get longest-active member") + } + start := time.Now() + n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) + ticker := time.NewTicker(n.opts.TickInterval / 10) + defer ticker.Stop() + var leader uint64 + for { + leader = n.leader() + if leader != raft.None && leader != n.Config.ID { + break + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } + log.G(ctx).Infof("raft: transfer leadership %x -> %x finished in %v", n.Config.ID, leader, time.Since(start)) + return nil } // RemoveMember submits a configuration change to remove a member from the raft cluster @@ -1726,23 +1770,12 @@ func (n *Node) applyRemoveNode(ctx context.Context, cc raftpb.ConfChange) (err e } if cc.NodeID == n.Config.ID { - // wait the commit ack to be sent before closing connection + // wait for the commit ack to be sent before closing connection n.asyncTasks.Wait() n.NodeRemoved() - // if there are only 2 nodes in the cluster, and leader is leaving - // before closing the connection, leader has to ensure that follower gets - // noticed about this raft conf change commit. Otherwise, follower would - // assume there are still 2 nodes in the cluster and won't get elected - // into the leader by acquiring the majority (2 nodes) - - // while n.asyncTasks.Wait() could be helpful in this case - // it's the best-effort strategy, because this send could be fail due to some errors (such as time limit exceeds) - // TODO(Runshen Zhu): use leadership transfer to solve this case, after vendoring raft 3.0+ - } else { - if err := n.transport.RemovePeer(cc.NodeID); err != nil { - return err - } + } else if err := n.transport.RemovePeer(cc.NodeID); err != nil { + return err } return n.cluster.RemoveMember(cc.NodeID) @@ -1852,3 +1885,7 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { } return sids } + +func (n *Node) reqTimeout() time.Duration { + return 5*time.Second + 2*time.Duration(n.Config.ElectionTick)*n.opts.TickInterval +} diff --git a/manager/state/raft/transport/transport.go b/manager/state/raft/transport/transport.go index ec1e971cf3..9ce8efd51b 100644 --- a/manager/state/raft/transport/transport.go +++ b/manager/state/raft/transport/transport.go @@ -295,6 +295,19 @@ func (t *Transport) Active(id uint64) bool { return active } +// LongestActive returns the ID of the peer that has been active for the longest +// length of time. +func (t *Transport) LongestActive() (uint64, error) { + p, err := t.longestActive() + if err != nil { + return 0, err + } + + return p.id, nil +} + +// longestActive returns the peer that has been active for the longest length of +// time. func (t *Transport) longestActive() (*peer, error) { var longest *peer var longestTime time.Time