From 82abb443545af02f1d76d22848add37b7288bd49 Mon Sep 17 00:00:00 2001 From: Joe Loser Date: Fri, 29 Jun 2018 07:38:20 -0400 Subject: [PATCH 1/2] Rename data members of ConsensusParms: Based on a TODO comment in DisputedTX.h, it seems at one point the data members of ConsensusParms were macros. Now that they are not, we should spell them like other data members (without all uppercase). --- src/ripple/app/misc/NetworkOPs.cpp | 2 +- src/ripple/consensus/Consensus.cpp | 10 ++--- src/ripple/consensus/Consensus.h | 28 ++++++------- src/ripple/consensus/ConsensusParms.h | 40 +++++++++---------- src/ripple/consensus/DisputedTx.h | 18 ++++----- .../consensus/ByzantineFailureSim_test.cpp | 2 +- src/test/consensus/Consensus_test.cpp | 40 +++++++++---------- src/test/consensus/ScaleFreeSim_test.cpp | 2 +- src/test/csf/Peer.h | 4 +- 9 files changed, 70 insertions(+), 76 deletions(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index d2f3107b79c..cdbce68988c 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -680,7 +680,7 @@ void NetworkOPsImp::setHeartbeatTimer () })) { heartbeatTimer_.expires_from_now ( - mConsensus.parms().ledgerGRANULARITY); + mConsensus.parms().ledgerGranularity); heartbeatTimer_.async_wait (std::move (*optionalCountedHandler)); } } diff --git a/src/ripple/consensus/Consensus.cpp b/src/ripple/consensus/Consensus.cpp index 1b08859c889..70957417921 100644 --- a/src/ripple/consensus/Consensus.cpp +++ b/src/ripple/consensus/Consensus.cpp @@ -63,7 +63,7 @@ shouldCloseLedger( } // Preserve minimum ledger open time - if (openTime < parms.ledgerMIN_CLOSE) + if (openTime < parms.ledgerMinClose) { JLOG(j.debug()) << "Must wait minimum time before closing"; return false; @@ -122,14 +122,14 @@ checkConsensus( << " time=" << currentAgreeTime.count() << "/" << previousAgreeTime.count(); - if (currentAgreeTime <= parms.ledgerMIN_CONSENSUS) + if (currentAgreeTime <= parms.ledgerMinConsensus) return ConsensusState::No; if (currentProposers < (prevProposers * 3 / 4)) { // Less than 3/4 of the last ledger's proposers are present; don't // rush: we may need more time. - if (currentAgreeTime < (previousAgreeTime + parms.ledgerMIN_CONSENSUS)) + if (currentAgreeTime < (previousAgreeTime + parms.ledgerMinConsensus)) { JLOG(j.trace()) << "too fast, not enough proposers"; return ConsensusState::No; @@ -139,7 +139,7 @@ checkConsensus( // Have we, together with the nodes on our UNL list, reached the threshold // to declare consensus? if (checkConsensusReached( - currentAgree, currentProposers, proposing, parms.minCONSENSUS_PCT)) + currentAgree, currentProposers, proposing, parms.minConsensusPct)) { JLOG(j.debug()) << "normal consensus"; return ConsensusState::Yes; @@ -148,7 +148,7 @@ checkConsensus( // Have sufficient nodes on our UNL list moved on and reached the threshold // to declare consensus? if (checkConsensusReached( - currentFinished, currentProposers, false, parms.minCONSENSUS_PCT)) + currentFinished, currentProposers, false, parms.minConsensusPct)) { JLOG(j.warn()) << "We see no consensus, but 80% of nodes have moved on"; return ConsensusState::MovedOn; diff --git a/src/ripple/consensus/Consensus.h b/src/ripple/consensus/Consensus.h index 9225a404870..4def7dbd06a 100644 --- a/src/ripple/consensus/Consensus.h +++ b/src/ripple/consensus/Consensus.h @@ -591,7 +591,7 @@ Consensus::startRound( if (firstRound_) { // take our initial view of closeTime_ from the seed ledger - prevRoundTime_ = adaptor_.parms().ledgerIDLE_INTERVAL; + prevRoundTime_ = adaptor_.parms().ledgerIdleInterval; prevCloseTime_ = prevLedger.closeTime(); firstRound_ = false; } @@ -1087,7 +1087,7 @@ Consensus::phaseOpen() } auto const idleInterval = std::max( - adaptor_.parms().ledgerIDLE_INTERVAL, + adaptor_.parms().ledgerIdleInterval, 2 * previousLedger_.closeTimeResolution()); // Decide if we should close the ledger @@ -1121,10 +1121,10 @@ Consensus::phaseEstablish() result_->proposers = currPeerPositions_.size(); convergePercent_ = result_->roundTime.read() * 100 / - std::max(prevRoundTime_, parms.avMIN_CONSENSUS_TIME); + std::max(prevRoundTime_, parms.avMinConsensusTime); // Give everyone a chance to take an initial position - if (result_->roundTime.read() < parms.ledgerMIN_CONSENSUS) + if (result_->roundTime.read() < parms.ledgerMinConsensus) return; updateOurPositions(); @@ -1214,8 +1214,8 @@ Consensus::updateOurPositions() ConsensusParms const & parms = adaptor_.parms(); // Compute a cutoff time - auto const peerCutoff = now_ - parms.proposeFRESHNESS; - auto const ourCutoff = now_ - parms.proposeINTERVAL; + auto const peerCutoff = now_ - parms.proposeFreshness; + auto const ourCutoff = now_ - parms.proposeInterval; // Verify freshness of peer positions and compute close times std::map closeTimeVotes; @@ -1290,14 +1290,14 @@ Consensus::updateOurPositions() { int neededWeight; - if (convergePercent_ < parms.avMID_CONSENSUS_TIME) - neededWeight = parms.avINIT_CONSENSUS_PCT; - else if (convergePercent_ < parms.avLATE_CONSENSUS_TIME) - neededWeight = parms.avMID_CONSENSUS_PCT; - else if (convergePercent_ < parms.avSTUCK_CONSENSUS_TIME) - neededWeight = parms.avLATE_CONSENSUS_PCT; + if (convergePercent_ < parms.avMidConsensusTime) + neededWeight = parms.avInitConsensusPct; + else if (convergePercent_ < parms.avLateConsensusTime) + neededWeight = parms.avMidConsensusPct; + else if (convergePercent_ < parms.avStuckConsensusTime) + neededWeight = parms.avLateConsensusPct; else - neededWeight = parms.avSTUCK_CONSENSUS_PCT; + neededWeight = parms.avStuckConsensusPct; int participants = currPeerPositions_.size(); if (mode_.get() == ConsensusMode::proposing) @@ -1311,7 +1311,7 @@ Consensus::updateOurPositions() // Threshold to declare consensus int const threshConsensus = - participantsNeeded(participants, parms.avCT_CONSENSUS_PCT); + participantsNeeded(participants, parms.avCtConsensusPct); JLOG(j_.info()) << "Proposers:" << currPeerPositions_.size() << " nw:" << neededWeight << " thrV:" << threshVote diff --git a/src/ripple/consensus/ConsensusParms.h b/src/ripple/consensus/ConsensusParms.h index e82577d0196..ea48af0f438 100644 --- a/src/ripple/consensus/ConsensusParms.h +++ b/src/ripple/consensus/ConsensusParms.h @@ -45,7 +45,7 @@ struct ConsensusParms This is a safety to protect against very old validations and the time it takes to adjust the close time accuracy window. */ - std::chrono::seconds validationVALID_WALL = 5min; + std::chrono::seconds validationValidWall = 5min; /** Duration a validation remains current after first observed. @@ -53,41 +53,39 @@ struct ConsensusParms first saw it. This provides faster recovery in very rare cases where the number of validations produced by the network is lower than normal */ - std::chrono::seconds validationVALID_LOCAL = 3min; + std::chrono::seconds validationValidLocal = 3min; /** Duration pre-close in which validations are acceptable. The number of seconds before a close time that we consider a validation acceptable. This protects against extreme clock errors */ - std::chrono::seconds validationVALID_EARLY = 3min; - + std::chrono::seconds validationValidEarly = 3min; //! How long we consider a proposal fresh - std::chrono::seconds proposeFRESHNESS = 20s; + std::chrono::seconds proposeFreshness = 20s; //! How often we force generating a new proposal to keep ours fresh - std::chrono::seconds proposeINTERVAL = 12s; - + std::chrono::seconds proposeInterval = 12s; //------------------------------------------------------------------------- // Consensus durations are relative to the internal Consenus clock and use // millisecond resolution. //! The percentage threshold above which we can declare consensus. - std::size_t minCONSENSUS_PCT = 80; + std::size_t minConsensusPct = 80; //! The duration a ledger may remain idle before closing - std::chrono::milliseconds ledgerIDLE_INTERVAL = 15s; + std::chrono::milliseconds ledgerIdleInterval = 15s; //! The number of seconds we wait minimum to ensure participation - std::chrono::milliseconds ledgerMIN_CONSENSUS = 1950ms; + std::chrono::milliseconds ledgerMinConsensus = 1950ms; //! Minimum number of seconds to wait to ensure others have computed the LCL - std::chrono::milliseconds ledgerMIN_CLOSE = 2s; + std::chrono::milliseconds ledgerMinClose = 2s; //! How often we check state or change positions - std::chrono::milliseconds ledgerGRANULARITY = 1s; + std::chrono::milliseconds ledgerGranularity = 1s; /** The minimum amount of time to consider the previous round to have taken. @@ -99,7 +97,7 @@ struct ConsensusParms twice the interval between proposals (0.7s) divided by the interval between mid and late consensus ([85-50]/100). */ - std::chrono::milliseconds avMIN_CONSENSUS_TIME = 5s; + std::chrono::milliseconds avMinConsensusTime = 5s; //------------------------------------------------------------------------------ // Avalanche tuning @@ -108,28 +106,28 @@ struct ConsensusParms // position. //! Percentage of nodes on our UNL that must vote yes - std::size_t avINIT_CONSENSUS_PCT = 50; + std::size_t avInitConsensusPct = 50; //! Percentage of previous round duration before we advance - std::size_t avMID_CONSENSUS_TIME = 50; + std::size_t avMidConsensusTime = 50; //! Percentage of nodes that most vote yes after advancing - std::size_t avMID_CONSENSUS_PCT = 65; + std::size_t avMidConsensusPct = 65; //! Percentage of previous round duration before we advance - std::size_t avLATE_CONSENSUS_TIME = 85; + std::size_t avLateConsensusTime = 85; //! Percentage of nodes that most vote yes after advancing - std::size_t avLATE_CONSENSUS_PCT = 70; + std::size_t avLateConsensusPct = 70; //! Percentage of previous round duration before we are stuck - std::size_t avSTUCK_CONSENSUS_TIME = 200; + std::size_t avStuckConsensusTime = 200; //! Percentage of nodes that must vote yes after we are stuck - std::size_t avSTUCK_CONSENSUS_PCT = 95; + std::size_t avStuckConsensusPct = 95; //! Percentage of nodes required to reach agreement on ledger close time - std::size_t avCT_CONSENSUS_PCT = 75; + std::size_t avCtConsensusPct = 75; //-------------------------------------------------------------------------- diff --git a/src/ripple/consensus/DisputedTx.h b/src/ripple/consensus/DisputedTx.h index 93612e58184..05ce1d16013 100644 --- a/src/ripple/consensus/DisputedTx.h +++ b/src/ripple/consensus/DisputedTx.h @@ -213,20 +213,16 @@ DisputedTx::updateVote( // This is basically the percentage of nodes voting 'yes' (including us) weight = (yays_ * 100 + (ourVote_ ? 100 : 0)) / (nays_ + yays_ + 1); - // VFALCO TODO Rename these macros and turn them into language - // constructs. consolidate them into a class that collects - // all these related values. - // // To prevent avalanche stalls, we increase the needed weight slightly // over time. - if (percentTime < p.avMID_CONSENSUS_TIME) - newPosition = weight > p.avINIT_CONSENSUS_PCT; - else if (percentTime < p.avLATE_CONSENSUS_TIME) - newPosition = weight > p.avMID_CONSENSUS_PCT; - else if (percentTime < p.avSTUCK_CONSENSUS_TIME) - newPosition = weight > p.avLATE_CONSENSUS_PCT; + if (percentTime < p.avMidConsensusTime) + newPosition = weight > p.avInitConsensusPct; + else if (percentTime < p.avLateConsensusTime) + newPosition = weight > p.avMidConsensusPct; + else if (percentTime < p.avStuckConsensusTime) + newPosition = weight > p.avLateConsensusPct; else - newPosition = weight > p.avSTUCK_CONSENSUS_PCT; + newPosition = weight > p.avStuckConsensusPct; } else { diff --git a/src/test/consensus/ByzantineFailureSim_test.cpp b/src/test/consensus/ByzantineFailureSim_test.cpp index ae547db9f88..73393cde209 100644 --- a/src/test/consensus/ByzantineFailureSim_test.cpp +++ b/src/test/consensus/ByzantineFailureSim_test.cpp @@ -40,7 +40,7 @@ class ByzantineFailureSim_test : public beast::unit_test::suite ConsensusParms const parms{}; SimDuration const delay = - date::round(0.2 * parms.ledgerGRANULARITY); + date::round(0.2 * parms.ledgerGranularity); PeerGroup a = sim.createGroup(1); PeerGroup b = sim.createGroup(1); PeerGroup c = sim.createGroup(1); diff --git a/src/test/consensus/Consensus_test.cpp b/src/test/consensus/Consensus_test.cpp index 70b75ec3973..112d0c52d68 100644 --- a/src/test/consensus/Consensus_test.cpp +++ b/src/test/consensus/Consensus_test.cpp @@ -146,7 +146,7 @@ class Consensus_test : public beast::unit_test::suite // Connected trust and network graphs with single fixed delay peers.trustAndConnect( - peers, date::round(0.2 * parms.ledgerGRANULARITY)); + peers, date::round(0.2 * parms.ledgerGranularity)); // everyone submits their own ID as a TX for (Peer * p : peers) @@ -194,11 +194,11 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( - fast, date::round(0.2 * parms.ledgerGRANULARITY)); + fast, date::round(0.2 * parms.ledgerGranularity)); slow.connect( network, - date::round(1.1 * parms.ledgerGRANULARITY)); + date::round(1.1 * parms.ledgerGranularity)); // All peers submit their own ID as a transaction for (Peer* peer : network) @@ -252,11 +252,11 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( fast, - date::round(0.2 * parms.ledgerGRANULARITY)); + date::round(0.2 * parms.ledgerGranularity)); slow.connect( network, - date::round(1.1 * parms.ledgerGRANULARITY)); + date::round(1.1 * parms.ledgerGranularity)); for (Peer* peer : slow) peer->runAsValidator = isParticipant; @@ -380,20 +380,20 @@ class Consensus_test : public beast::unit_test::suite network.trust(network); network.connect( - network, date::round(0.2 * parms.ledgerGRANULARITY)); + network, date::round(0.2 * parms.ledgerGranularity)); // Run consensus without skew until we have a short close time // resolution Peer* firstPeer = *groupA.begin(); while (firstPeer->lastClosedLedger.closeTimeResolution() >= - parms.proposeFRESHNESS) + parms.proposeFreshness) sim.run(1); // Introduce a shift on the time of 2/3 of peers for (Peer* peer : groupA) - peer->clockSkew = parms.proposeFRESHNESS / 2; + peer->clockSkew = parms.proposeFreshness / 2; for (Peer* peer : groupB) - peer->clockSkew = parms.proposeFRESHNESS; + peer->clockSkew = parms.proposeFreshness; sim.run(1); @@ -417,7 +417,7 @@ class Consensus_test : public beast::unit_test::suite // Vary the time it takes to process validations to exercise detecting // the wrong LCL at different phases of consensus - for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE}) + for (auto validationDelay : {0ms, parms.ledgerMinClose}) { // Consider 10 peers: // 0 1 2 3 4 5 6 7 8 9 @@ -450,14 +450,14 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = minority + majority; SimDuration delay = - date::round(0.2 * parms.ledgerGRANULARITY); + date::round(0.2 * parms.ledgerGranularity); minority.trustAndConnect(minority + majorityA, delay); majority.trustAndConnect(majority, delay); CollectByNode jumps; sim.collectors.add(jumps); - BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.)); + BEAST_EXPECT(sim.trustGraph.canFork(parms.minConsensusPct / 100.)); // initial round to set prior state sim.run(1); @@ -556,7 +556,7 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = loner + clique; network.connect( network, - date::round(0.2 * parms.ledgerGRANULARITY)); + date::round(0.2 * parms.ledgerGranularity)); // initial round to set prior state sim.run(1); @@ -567,7 +567,7 @@ class Consensus_test : public beast::unit_test::suite // Delay validation processing for (Peer* peer : network) - peer->delays.recvValidation = parms.ledgerGRANULARITY; + peer->delays.recvValidation = parms.ledgerGranularity; // additional rounds to generate wrongLCL and recover sim.run(2); @@ -610,10 +610,10 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( - fast, date::round(0.2 * parms.ledgerGRANULARITY)); + fast, date::round(0.2 * parms.ledgerGranularity)); slow.connect( network, - date::round(1.1 * parms.ledgerGRANULARITY)); + date::round(1.1 * parms.ledgerGranularity)); // Run to the ledger *prior* to decreasing the resolution sim.run(increaseLedgerTimeResolutionEvery - 2); @@ -762,7 +762,7 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = a + b; SimDuration delay = - date::round(0.2 * parms.ledgerGRANULARITY); + date::round(0.2 * parms.ledgerGranularity); a.trustAndConnect(a, delay); b.trustAndConnect(b, delay); @@ -809,7 +809,7 @@ class Consensus_test : public beast::unit_test::suite center.trust(validators); SimDuration delay = - date::round(0.2 * parms.ledgerGRANULARITY); + date::round(0.2 * parms.ledgerGranularity); validators.connect(center, delay); center[0]->runAsValidator = false; @@ -934,9 +934,9 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = groupABD + groupCsplit + groupCfast; SimDuration delay = date::round( - 0.2 * parms.ledgerGRANULARITY); + 0.2 * parms.ledgerGranularity); SimDuration fDelay = date::round( - 0.1 * parms.ledgerGRANULARITY); + 0.1 * parms.ledgerGranularity); network.trust(network); // C must have a shorter delay to see all the validations before the diff --git a/src/test/consensus/ScaleFreeSim_test.cpp b/src/test/consensus/ScaleFreeSim_test.cpp index 1163a28f80a..0d398f62b1e 100644 --- a/src/test/consensus/ScaleFreeSim_test.cpp +++ b/src/test/consensus/ScaleFreeSim_test.cpp @@ -57,7 +57,7 @@ class ScaleFreeSim_test : public beast::unit_test::suite // nodes with a trust line in either direction are network-connected network.connectFromTrust( - date::round(0.2 * parms.ledgerGRANULARITY)); + date::round(0.2 * parms.ledgerGranularity)); // Initialize collectors to track statistics to report TxCollector txCollector; diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index c536b25fa30..4e4a8de6dae 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -858,7 +858,7 @@ struct Peer consensus.timerEntry(now()); // only reschedule if not completed if (completedLedgers < targetLedgers) - scheduler.in(parms().ledgerGRANULARITY, [this]() { timerEntry(); }); + scheduler.in(parms().ledgerGranularity, [this]() { timerEntry(); }); } // Called to begin the next round @@ -888,7 +888,7 @@ struct Peer { // TODO: Expire validations less frequently? validations.expire(); - scheduler.in(parms().ledgerGRANULARITY, [&]() { timerEntry(); }); + scheduler.in(parms().ledgerGranularity, [&]() { timerEntry(); }); startRound(); } From ab65c472e5342ff546f018ddbe34fc341ea300c0 Mon Sep 17 00:00:00 2001 From: Joe Loser Date: Thu, 5 Jul 2018 20:55:35 -0400 Subject: [PATCH 2/2] [FOLD] Just remove the TODO comment --- src/ripple/app/misc/NetworkOPs.cpp | 2 +- src/ripple/consensus/Consensus.cpp | 10 ++--- src/ripple/consensus/Consensus.h | 28 ++++++------- src/ripple/consensus/ConsensusParms.h | 40 ++++++++++--------- src/ripple/consensus/DisputedTx.h | 14 +++---- .../consensus/ByzantineFailureSim_test.cpp | 2 +- src/test/consensus/Consensus_test.cpp | 40 +++++++++---------- src/test/consensus/ScaleFreeSim_test.cpp | 2 +- src/test/csf/Peer.h | 4 +- 9 files changed, 72 insertions(+), 70 deletions(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index cdbce68988c..d2f3107b79c 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -680,7 +680,7 @@ void NetworkOPsImp::setHeartbeatTimer () })) { heartbeatTimer_.expires_from_now ( - mConsensus.parms().ledgerGranularity); + mConsensus.parms().ledgerGRANULARITY); heartbeatTimer_.async_wait (std::move (*optionalCountedHandler)); } } diff --git a/src/ripple/consensus/Consensus.cpp b/src/ripple/consensus/Consensus.cpp index 70957417921..1b08859c889 100644 --- a/src/ripple/consensus/Consensus.cpp +++ b/src/ripple/consensus/Consensus.cpp @@ -63,7 +63,7 @@ shouldCloseLedger( } // Preserve minimum ledger open time - if (openTime < parms.ledgerMinClose) + if (openTime < parms.ledgerMIN_CLOSE) { JLOG(j.debug()) << "Must wait minimum time before closing"; return false; @@ -122,14 +122,14 @@ checkConsensus( << " time=" << currentAgreeTime.count() << "/" << previousAgreeTime.count(); - if (currentAgreeTime <= parms.ledgerMinConsensus) + if (currentAgreeTime <= parms.ledgerMIN_CONSENSUS) return ConsensusState::No; if (currentProposers < (prevProposers * 3 / 4)) { // Less than 3/4 of the last ledger's proposers are present; don't // rush: we may need more time. - if (currentAgreeTime < (previousAgreeTime + parms.ledgerMinConsensus)) + if (currentAgreeTime < (previousAgreeTime + parms.ledgerMIN_CONSENSUS)) { JLOG(j.trace()) << "too fast, not enough proposers"; return ConsensusState::No; @@ -139,7 +139,7 @@ checkConsensus( // Have we, together with the nodes on our UNL list, reached the threshold // to declare consensus? if (checkConsensusReached( - currentAgree, currentProposers, proposing, parms.minConsensusPct)) + currentAgree, currentProposers, proposing, parms.minCONSENSUS_PCT)) { JLOG(j.debug()) << "normal consensus"; return ConsensusState::Yes; @@ -148,7 +148,7 @@ checkConsensus( // Have sufficient nodes on our UNL list moved on and reached the threshold // to declare consensus? if (checkConsensusReached( - currentFinished, currentProposers, false, parms.minConsensusPct)) + currentFinished, currentProposers, false, parms.minCONSENSUS_PCT)) { JLOG(j.warn()) << "We see no consensus, but 80% of nodes have moved on"; return ConsensusState::MovedOn; diff --git a/src/ripple/consensus/Consensus.h b/src/ripple/consensus/Consensus.h index 4def7dbd06a..9225a404870 100644 --- a/src/ripple/consensus/Consensus.h +++ b/src/ripple/consensus/Consensus.h @@ -591,7 +591,7 @@ Consensus::startRound( if (firstRound_) { // take our initial view of closeTime_ from the seed ledger - prevRoundTime_ = adaptor_.parms().ledgerIdleInterval; + prevRoundTime_ = adaptor_.parms().ledgerIDLE_INTERVAL; prevCloseTime_ = prevLedger.closeTime(); firstRound_ = false; } @@ -1087,7 +1087,7 @@ Consensus::phaseOpen() } auto const idleInterval = std::max( - adaptor_.parms().ledgerIdleInterval, + adaptor_.parms().ledgerIDLE_INTERVAL, 2 * previousLedger_.closeTimeResolution()); // Decide if we should close the ledger @@ -1121,10 +1121,10 @@ Consensus::phaseEstablish() result_->proposers = currPeerPositions_.size(); convergePercent_ = result_->roundTime.read() * 100 / - std::max(prevRoundTime_, parms.avMinConsensusTime); + std::max(prevRoundTime_, parms.avMIN_CONSENSUS_TIME); // Give everyone a chance to take an initial position - if (result_->roundTime.read() < parms.ledgerMinConsensus) + if (result_->roundTime.read() < parms.ledgerMIN_CONSENSUS) return; updateOurPositions(); @@ -1214,8 +1214,8 @@ Consensus::updateOurPositions() ConsensusParms const & parms = adaptor_.parms(); // Compute a cutoff time - auto const peerCutoff = now_ - parms.proposeFreshness; - auto const ourCutoff = now_ - parms.proposeInterval; + auto const peerCutoff = now_ - parms.proposeFRESHNESS; + auto const ourCutoff = now_ - parms.proposeINTERVAL; // Verify freshness of peer positions and compute close times std::map closeTimeVotes; @@ -1290,14 +1290,14 @@ Consensus::updateOurPositions() { int neededWeight; - if (convergePercent_ < parms.avMidConsensusTime) - neededWeight = parms.avInitConsensusPct; - else if (convergePercent_ < parms.avLateConsensusTime) - neededWeight = parms.avMidConsensusPct; - else if (convergePercent_ < parms.avStuckConsensusTime) - neededWeight = parms.avLateConsensusPct; + if (convergePercent_ < parms.avMID_CONSENSUS_TIME) + neededWeight = parms.avINIT_CONSENSUS_PCT; + else if (convergePercent_ < parms.avLATE_CONSENSUS_TIME) + neededWeight = parms.avMID_CONSENSUS_PCT; + else if (convergePercent_ < parms.avSTUCK_CONSENSUS_TIME) + neededWeight = parms.avLATE_CONSENSUS_PCT; else - neededWeight = parms.avStuckConsensusPct; + neededWeight = parms.avSTUCK_CONSENSUS_PCT; int participants = currPeerPositions_.size(); if (mode_.get() == ConsensusMode::proposing) @@ -1311,7 +1311,7 @@ Consensus::updateOurPositions() // Threshold to declare consensus int const threshConsensus = - participantsNeeded(participants, parms.avCtConsensusPct); + participantsNeeded(participants, parms.avCT_CONSENSUS_PCT); JLOG(j_.info()) << "Proposers:" << currPeerPositions_.size() << " nw:" << neededWeight << " thrV:" << threshVote diff --git a/src/ripple/consensus/ConsensusParms.h b/src/ripple/consensus/ConsensusParms.h index ea48af0f438..e82577d0196 100644 --- a/src/ripple/consensus/ConsensusParms.h +++ b/src/ripple/consensus/ConsensusParms.h @@ -45,7 +45,7 @@ struct ConsensusParms This is a safety to protect against very old validations and the time it takes to adjust the close time accuracy window. */ - std::chrono::seconds validationValidWall = 5min; + std::chrono::seconds validationVALID_WALL = 5min; /** Duration a validation remains current after first observed. @@ -53,39 +53,41 @@ struct ConsensusParms first saw it. This provides faster recovery in very rare cases where the number of validations produced by the network is lower than normal */ - std::chrono::seconds validationValidLocal = 3min; + std::chrono::seconds validationVALID_LOCAL = 3min; /** Duration pre-close in which validations are acceptable. The number of seconds before a close time that we consider a validation acceptable. This protects against extreme clock errors */ - std::chrono::seconds validationValidEarly = 3min; + std::chrono::seconds validationVALID_EARLY = 3min; + //! How long we consider a proposal fresh - std::chrono::seconds proposeFreshness = 20s; + std::chrono::seconds proposeFRESHNESS = 20s; //! How often we force generating a new proposal to keep ours fresh - std::chrono::seconds proposeInterval = 12s; + std::chrono::seconds proposeINTERVAL = 12s; + //------------------------------------------------------------------------- // Consensus durations are relative to the internal Consenus clock and use // millisecond resolution. //! The percentage threshold above which we can declare consensus. - std::size_t minConsensusPct = 80; + std::size_t minCONSENSUS_PCT = 80; //! The duration a ledger may remain idle before closing - std::chrono::milliseconds ledgerIdleInterval = 15s; + std::chrono::milliseconds ledgerIDLE_INTERVAL = 15s; //! The number of seconds we wait minimum to ensure participation - std::chrono::milliseconds ledgerMinConsensus = 1950ms; + std::chrono::milliseconds ledgerMIN_CONSENSUS = 1950ms; //! Minimum number of seconds to wait to ensure others have computed the LCL - std::chrono::milliseconds ledgerMinClose = 2s; + std::chrono::milliseconds ledgerMIN_CLOSE = 2s; //! How often we check state or change positions - std::chrono::milliseconds ledgerGranularity = 1s; + std::chrono::milliseconds ledgerGRANULARITY = 1s; /** The minimum amount of time to consider the previous round to have taken. @@ -97,7 +99,7 @@ struct ConsensusParms twice the interval between proposals (0.7s) divided by the interval between mid and late consensus ([85-50]/100). */ - std::chrono::milliseconds avMinConsensusTime = 5s; + std::chrono::milliseconds avMIN_CONSENSUS_TIME = 5s; //------------------------------------------------------------------------------ // Avalanche tuning @@ -106,28 +108,28 @@ struct ConsensusParms // position. //! Percentage of nodes on our UNL that must vote yes - std::size_t avInitConsensusPct = 50; + std::size_t avINIT_CONSENSUS_PCT = 50; //! Percentage of previous round duration before we advance - std::size_t avMidConsensusTime = 50; + std::size_t avMID_CONSENSUS_TIME = 50; //! Percentage of nodes that most vote yes after advancing - std::size_t avMidConsensusPct = 65; + std::size_t avMID_CONSENSUS_PCT = 65; //! Percentage of previous round duration before we advance - std::size_t avLateConsensusTime = 85; + std::size_t avLATE_CONSENSUS_TIME = 85; //! Percentage of nodes that most vote yes after advancing - std::size_t avLateConsensusPct = 70; + std::size_t avLATE_CONSENSUS_PCT = 70; //! Percentage of previous round duration before we are stuck - std::size_t avStuckConsensusTime = 200; + std::size_t avSTUCK_CONSENSUS_TIME = 200; //! Percentage of nodes that must vote yes after we are stuck - std::size_t avStuckConsensusPct = 95; + std::size_t avSTUCK_CONSENSUS_PCT = 95; //! Percentage of nodes required to reach agreement on ledger close time - std::size_t avCtConsensusPct = 75; + std::size_t avCT_CONSENSUS_PCT = 75; //-------------------------------------------------------------------------- diff --git a/src/ripple/consensus/DisputedTx.h b/src/ripple/consensus/DisputedTx.h index 05ce1d16013..adfa218dfaf 100644 --- a/src/ripple/consensus/DisputedTx.h +++ b/src/ripple/consensus/DisputedTx.h @@ -215,14 +215,14 @@ DisputedTx::updateVote( // To prevent avalanche stalls, we increase the needed weight slightly // over time. - if (percentTime < p.avMidConsensusTime) - newPosition = weight > p.avInitConsensusPct; - else if (percentTime < p.avLateConsensusTime) - newPosition = weight > p.avMidConsensusPct; - else if (percentTime < p.avStuckConsensusTime) - newPosition = weight > p.avLateConsensusPct; + if (percentTime < p.avMID_CONSENSUS_TIME) + newPosition = weight > p.avINIT_CONSENSUS_PCT; + else if (percentTime < p.avLATE_CONSENSUS_TIME) + newPosition = weight > p.avMID_CONSENSUS_PCT; + else if (percentTime < p.avSTUCK_CONSENSUS_TIME) + newPosition = weight > p.avLATE_CONSENSUS_PCT; else - newPosition = weight > p.avStuckConsensusPct; + newPosition = weight > p.avSTUCK_CONSENSUS_PCT; } else { diff --git a/src/test/consensus/ByzantineFailureSim_test.cpp b/src/test/consensus/ByzantineFailureSim_test.cpp index 73393cde209..ae547db9f88 100644 --- a/src/test/consensus/ByzantineFailureSim_test.cpp +++ b/src/test/consensus/ByzantineFailureSim_test.cpp @@ -40,7 +40,7 @@ class ByzantineFailureSim_test : public beast::unit_test::suite ConsensusParms const parms{}; SimDuration const delay = - date::round(0.2 * parms.ledgerGranularity); + date::round(0.2 * parms.ledgerGRANULARITY); PeerGroup a = sim.createGroup(1); PeerGroup b = sim.createGroup(1); PeerGroup c = sim.createGroup(1); diff --git a/src/test/consensus/Consensus_test.cpp b/src/test/consensus/Consensus_test.cpp index 112d0c52d68..70b75ec3973 100644 --- a/src/test/consensus/Consensus_test.cpp +++ b/src/test/consensus/Consensus_test.cpp @@ -146,7 +146,7 @@ class Consensus_test : public beast::unit_test::suite // Connected trust and network graphs with single fixed delay peers.trustAndConnect( - peers, date::round(0.2 * parms.ledgerGranularity)); + peers, date::round(0.2 * parms.ledgerGRANULARITY)); // everyone submits their own ID as a TX for (Peer * p : peers) @@ -194,11 +194,11 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( - fast, date::round(0.2 * parms.ledgerGranularity)); + fast, date::round(0.2 * parms.ledgerGRANULARITY)); slow.connect( network, - date::round(1.1 * parms.ledgerGranularity)); + date::round(1.1 * parms.ledgerGRANULARITY)); // All peers submit their own ID as a transaction for (Peer* peer : network) @@ -252,11 +252,11 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( fast, - date::round(0.2 * parms.ledgerGranularity)); + date::round(0.2 * parms.ledgerGRANULARITY)); slow.connect( network, - date::round(1.1 * parms.ledgerGranularity)); + date::round(1.1 * parms.ledgerGRANULARITY)); for (Peer* peer : slow) peer->runAsValidator = isParticipant; @@ -380,20 +380,20 @@ class Consensus_test : public beast::unit_test::suite network.trust(network); network.connect( - network, date::round(0.2 * parms.ledgerGranularity)); + network, date::round(0.2 * parms.ledgerGRANULARITY)); // Run consensus without skew until we have a short close time // resolution Peer* firstPeer = *groupA.begin(); while (firstPeer->lastClosedLedger.closeTimeResolution() >= - parms.proposeFreshness) + parms.proposeFRESHNESS) sim.run(1); // Introduce a shift on the time of 2/3 of peers for (Peer* peer : groupA) - peer->clockSkew = parms.proposeFreshness / 2; + peer->clockSkew = parms.proposeFRESHNESS / 2; for (Peer* peer : groupB) - peer->clockSkew = parms.proposeFreshness; + peer->clockSkew = parms.proposeFRESHNESS; sim.run(1); @@ -417,7 +417,7 @@ class Consensus_test : public beast::unit_test::suite // Vary the time it takes to process validations to exercise detecting // the wrong LCL at different phases of consensus - for (auto validationDelay : {0ms, parms.ledgerMinClose}) + for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE}) { // Consider 10 peers: // 0 1 2 3 4 5 6 7 8 9 @@ -450,14 +450,14 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = minority + majority; SimDuration delay = - date::round(0.2 * parms.ledgerGranularity); + date::round(0.2 * parms.ledgerGRANULARITY); minority.trustAndConnect(minority + majorityA, delay); majority.trustAndConnect(majority, delay); CollectByNode jumps; sim.collectors.add(jumps); - BEAST_EXPECT(sim.trustGraph.canFork(parms.minConsensusPct / 100.)); + BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.)); // initial round to set prior state sim.run(1); @@ -556,7 +556,7 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = loner + clique; network.connect( network, - date::round(0.2 * parms.ledgerGranularity)); + date::round(0.2 * parms.ledgerGRANULARITY)); // initial round to set prior state sim.run(1); @@ -567,7 +567,7 @@ class Consensus_test : public beast::unit_test::suite // Delay validation processing for (Peer* peer : network) - peer->delays.recvValidation = parms.ledgerGranularity; + peer->delays.recvValidation = parms.ledgerGRANULARITY; // additional rounds to generate wrongLCL and recover sim.run(2); @@ -610,10 +610,10 @@ class Consensus_test : public beast::unit_test::suite // Fast and slow network connections fast.connect( - fast, date::round(0.2 * parms.ledgerGranularity)); + fast, date::round(0.2 * parms.ledgerGRANULARITY)); slow.connect( network, - date::round(1.1 * parms.ledgerGranularity)); + date::round(1.1 * parms.ledgerGRANULARITY)); // Run to the ledger *prior* to decreasing the resolution sim.run(increaseLedgerTimeResolutionEvery - 2); @@ -762,7 +762,7 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = a + b; SimDuration delay = - date::round(0.2 * parms.ledgerGranularity); + date::round(0.2 * parms.ledgerGRANULARITY); a.trustAndConnect(a, delay); b.trustAndConnect(b, delay); @@ -809,7 +809,7 @@ class Consensus_test : public beast::unit_test::suite center.trust(validators); SimDuration delay = - date::round(0.2 * parms.ledgerGranularity); + date::round(0.2 * parms.ledgerGRANULARITY); validators.connect(center, delay); center[0]->runAsValidator = false; @@ -934,9 +934,9 @@ class Consensus_test : public beast::unit_test::suite PeerGroup network = groupABD + groupCsplit + groupCfast; SimDuration delay = date::round( - 0.2 * parms.ledgerGranularity); + 0.2 * parms.ledgerGRANULARITY); SimDuration fDelay = date::round( - 0.1 * parms.ledgerGranularity); + 0.1 * parms.ledgerGRANULARITY); network.trust(network); // C must have a shorter delay to see all the validations before the diff --git a/src/test/consensus/ScaleFreeSim_test.cpp b/src/test/consensus/ScaleFreeSim_test.cpp index 0d398f62b1e..1163a28f80a 100644 --- a/src/test/consensus/ScaleFreeSim_test.cpp +++ b/src/test/consensus/ScaleFreeSim_test.cpp @@ -57,7 +57,7 @@ class ScaleFreeSim_test : public beast::unit_test::suite // nodes with a trust line in either direction are network-connected network.connectFromTrust( - date::round(0.2 * parms.ledgerGranularity)); + date::round(0.2 * parms.ledgerGRANULARITY)); // Initialize collectors to track statistics to report TxCollector txCollector; diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 4e4a8de6dae..c536b25fa30 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -858,7 +858,7 @@ struct Peer consensus.timerEntry(now()); // only reschedule if not completed if (completedLedgers < targetLedgers) - scheduler.in(parms().ledgerGranularity, [this]() { timerEntry(); }); + scheduler.in(parms().ledgerGRANULARITY, [this]() { timerEntry(); }); } // Called to begin the next round @@ -888,7 +888,7 @@ struct Peer { // TODO: Expire validations less frequently? validations.expire(); - scheduler.in(parms().ledgerGranularity, [&]() { timerEntry(); }); + scheduler.in(parms().ledgerGRANULARITY, [&]() { timerEntry(); }); startRound(); }