@@ -387,12 +387,7 @@ type Replica struct {
387
387
// map must only be referenced while Replica.mu is held, except if the
388
388
// element is removed from the map first. The notable exception is the
389
389
// contained RaftCommand, which we treat as immutable.
390
- localProposals map [storagebase.CmdIDKey ]* ProposalData
391
- // remoteProposals is maintained by Raft leaders and stores in-flight
392
- // commands that were forwarded to the leader during its current term.
393
- // The set allows leaders to detect duplicate forwarded commands and
394
- // avoid re-proposing the same forwarded command multiple times.
395
- remoteProposals map [storagebase.CmdIDKey ]struct {}
390
+ localProposals map [storagebase.CmdIDKey ]* ProposalData
396
391
internalRaftGroup * raft.RawNode
397
392
// The ID of the replica within the Raft group. May be 0 if the replica has
398
393
// been created from a preemptive snapshot (i.e. before being added to the
@@ -883,7 +878,6 @@ func (r *Replica) cancelPendingCommandsLocked() {
883
878
r .cleanupFailedProposalLocked (p )
884
879
p .finishApplication (pr )
885
880
}
886
- r .mu .remoteProposals = nil
887
881
}
888
882
889
883
// cleanupFailedProposalLocked cleans up after a proposal that has failed. It
@@ -1118,22 +1112,12 @@ func (r *Replica) updateProposalQuotaRaftMuLocked(
1118
1112
log .Fatalf (ctx , "len(r.mu.commandSizes) = %d, expected 0" , commandSizesLen )
1119
1113
}
1120
1114
1121
- // We set the defaultProposalQuota to be less than RaftLogMaxSize,
1122
- // in doing so we ensure all replicas have sufficiently up to date
1123
- // logs so that when the log gets truncated, the followers do not
1124
- // need non-preemptive snapshots. Changing this deserves care. Too
1125
- // low and everything comes to a grinding halt, too high and we're
1126
- // not really throttling anything (we'll still generate snapshots).
1127
- //
1128
- // TODO(nvanbenschoten): clean this up in later commits.
1129
- proposalQuota := r .store .cfg .RaftLogMaxSize / 4
1130
-
1131
1115
// Raft may propose commands itself (specifically the empty
1132
1116
// commands when leadership changes), and these commands don't go
1133
1117
// through the code paths where we acquire quota from the pool. To
1134
1118
// offset this we reset the quota pool whenever leadership changes
1135
1119
// hands.
1136
- r .mu .proposalQuota = newQuotaPool (proposalQuota )
1120
+ r .mu .proposalQuota = newQuotaPool (r . store . cfg . RaftProposalQuota )
1137
1121
r .mu .lastUpdateTimes = make (map [roachpb.ReplicaID ]time.Time )
1138
1122
r .mu .commandSizes = make (map [storagebase.CmdIDKey ]int )
1139
1123
} else if r .mu .proposalQuota != nil {
@@ -1913,7 +1897,6 @@ func (r *Replica) State() storagepb.RangeInfo {
1913
1897
ri .ReplicaState = * (protoutil .Clone (& r .mu .state )).(* storagepb.ReplicaState )
1914
1898
ri .LastIndex = r .mu .lastIndex
1915
1899
ri .NumPending = uint64 (len (r .mu .localProposals ))
1916
- ri .NumRemotePending = uint64 (len (r .mu .remoteProposals ))
1917
1900
ri .RaftLogSize = r .mu .raftLogSize
1918
1901
ri .NumDropped = uint64 (r .mu .droppedMessages )
1919
1902
if r .mu .proposalQuota != nil {
@@ -4042,20 +4025,7 @@ func (r *Replica) stepRaftGroup(req *RaftMessageRequest) error {
4042
4025
// we expect the originator to campaign instead.
4043
4026
r .unquiesceWithOptionsLocked (false /* campaignOnWake */ )
4044
4027
r .refreshLastUpdateTimeForReplicaLocked (req .FromReplica .ReplicaID )
4045
-
4046
- // Check if the message is a proposal that should be dropped.
4047
- if r .shouldDropForwardedProposalLocked (req ) {
4048
- // If we could signal to the sender that its proposal was accepted
4049
- // or dropped then we wouldn't need to track anything.
4050
- return false /* unquiesceAndWakeLeader */ , nil
4051
- }
4052
-
4053
4028
err := raftGroup .Step (req .Message )
4054
- if err == nil {
4055
- // If we stepped successfully and the request is a proposal, consider
4056
- // tracking it so that we can ignore identical proposals in the future.
4057
- r .maybeTrackForwardedProposalLocked (raftGroup , req )
4058
- }
4059
4029
if err == raft .ErrProposalDropped {
4060
4030
// A proposal was forwarded to this replica but we couldn't propose it.
4061
4031
// Swallow the error since we don't have an effective way of signaling
@@ -4068,68 +4038,6 @@ func (r *Replica) stepRaftGroup(req *RaftMessageRequest) error {
4068
4038
})
4069
4039
}
4070
4040
4071
- func (r * Replica ) shouldDropForwardedProposalLocked (req * RaftMessageRequest ) bool {
4072
- if req .Message .Type != raftpb .MsgProp {
4073
- // Not a proposal.
4074
- return false
4075
- }
4076
-
4077
- for _ , e := range req .Message .Entries {
4078
- switch e .Type {
4079
- case raftpb .EntryNormal :
4080
- cmdID , _ := DecodeRaftCommand (e .Data )
4081
- if _ , ok := r .mu .remoteProposals [cmdID ]; ! ok {
4082
- // Untracked remote proposal. Don't drop.
4083
- return false
4084
- }
4085
- case raftpb .EntryConfChange :
4086
- // Never drop EntryConfChange proposals.
4087
- return false
4088
- default :
4089
- log .Fatalf (context .TODO (), "unexpected Raft entry: %v" , e )
4090
- }
4091
- }
4092
- // All entries tracked.
4093
- return true
4094
- }
4095
-
4096
- func (r * Replica ) maybeTrackForwardedProposalLocked (rg * raft.RawNode , req * RaftMessageRequest ) {
4097
- if req .Message .Type != raftpb .MsgProp {
4098
- // Not a proposal.
4099
- return
4100
- }
4101
-
4102
- if rg .Status ().RaftState != raft .StateLeader {
4103
- // We're not the leader. We can't be sure that the proposal made it into
4104
- // the Raft log, so don't track it.
4105
- return
4106
- }
4107
-
4108
- // Record that each of the proposal's entries was seen and appended. This
4109
- // allows us to catch duplicate forwarded proposals in the future and
4110
- // prevent them from being repeatedly appended to a leader's raft log.
4111
- for _ , e := range req .Message .Entries {
4112
- switch e .Type {
4113
- case raftpb .EntryNormal :
4114
- cmdID , data := DecodeRaftCommand (e .Data )
4115
- if len (data ) == 0 {
4116
- // An empty command is proposed to unquiesce a range and
4117
- // wake the leader. Don't keep track of these forwarded
4118
- // proposals because they will never be cleaned up.
4119
- } else {
4120
- if r .mu .remoteProposals == nil {
4121
- r .mu .remoteProposals = map [storagebase.CmdIDKey ]struct {}{}
4122
- }
4123
- r .mu .remoteProposals [cmdID ] = struct {}{}
4124
- }
4125
- case raftpb .EntryConfChange :
4126
- // Don't track EntryConfChanges.
4127
- default :
4128
- log .Fatalf (context .TODO (), "unexpected Raft entry: %v" , e )
4129
- }
4130
- }
4131
- }
4132
-
4133
4041
type handleRaftReadyStats struct {
4134
4042
processed int
4135
4043
}
@@ -4394,7 +4302,6 @@ func (r *Replica) handleRaftReadyRaftMuLocked(
4394
4302
r .mu .leaderID = leaderID
4395
4303
// Clear the remote proposal set. Would have been nil already if not
4396
4304
// previously the leader.
4397
- r .mu .remoteProposals = nil
4398
4305
becameLeader = r .mu .leaderID == r .mu .replicaID
4399
4306
}
4400
4307
r .mu .Unlock ()
@@ -4599,22 +4506,13 @@ func (r *Replica) tick(livenessMap IsLiveMap) (bool, error) {
4599
4506
if knob := r .store .TestingKnobs ().RefreshReasonTicksPeriod ; knob > 0 {
4600
4507
refreshAtDelta = knob
4601
4508
}
4602
- if ! r .store .TestingKnobs ().DisableRefreshReasonTicks &&
4603
- r .mu .replicaID != r .mu .leaderID &&
4604
- r .mu .ticks % refreshAtDelta == 0 {
4509
+ if ! r .store .TestingKnobs ().DisableRefreshReasonTicks && r .mu .ticks % refreshAtDelta == 0 {
4605
4510
// RaftElectionTimeoutTicks is a reasonable approximation of how long we
4606
4511
// should wait before deciding that our previous proposal didn't go
4607
4512
// through. Note that the combination of the above condition and passing
4608
4513
// RaftElectionTimeoutTicks to refreshProposalsLocked means that commands
4609
4514
// will be refreshed when they have been pending for 1 to 2 election
4610
4515
// cycles.
4611
- //
4612
- // However, we don't refresh proposals if we are the leader because
4613
- // doing so would be useless. The commands tracked by a leader replica
4614
- // were either all proposed when the replica was a leader or were
4615
- // re-proposed when the replica became a leader. Either way, they are
4616
- // guaranteed to be in the leader's Raft log so re-proposing won't do
4617
- // anything.
4618
4516
r .refreshProposalsLocked (refreshAtDelta , reasonTicks )
4619
4517
}
4620
4518
return true , nil
@@ -5407,9 +5305,6 @@ func (r *Replica) processRaftCommand(
5407
5305
delete (r .mu .localProposals , idKey )
5408
5306
}
5409
5307
5410
- // Delete the entry for a forwarded proposal set.
5411
- delete (r .mu .remoteProposals , idKey )
5412
-
5413
5308
leaseIndex , proposalRetry , forcedErr := r .checkForcedErrLocked (ctx , idKey , raftCmd , proposal , proposedLocally )
5414
5309
5415
5310
r .mu .Unlock ()
0 commit comments