diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index af7c7ffb003..adcae564f56 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1099,7 +1099,7 @@ mod tests { SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::Writeable; - use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + use lightning::util::sweep::{OutputSpendStatus, OutputSweeper, PRUNE_DELAY_BLOCKS}; use lightning::util::test_utils; use lightning::{get_event, get_event_msg}; use lightning_persister::fs_store::FilesystemStore; @@ -2282,8 +2282,8 @@ mod tests { } // Check we stop tracking the spendable outputs when one of the txs reaches - // ANTI_REORG_DELAY confirmations. - confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY); + // PRUNE_DELAY_BLOCKS confirmations. + confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, PRUNE_DELAY_BLOCKS); assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0); if !std::thread::panicking() { diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3f6bdc3f256..2d5667a15bd 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; // solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not // keep bumping another claim tx to solve the outpoint. pub const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and +/// considering it be safely archived. +// 4032 blocks are roughly four weeks +pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we /// refuse to accept a new HTLC. /// @@ -2015,10 +2019,11 @@ impl ChannelMonitor { /// /// This function returns a tuple of two booleans, the first indicating whether the monitor is /// fully resolved, and the second whether the monitor needs persistence to ensure it is - /// reliably marked as resolved within 4032 blocks. + /// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks. /// - /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least - /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets. + /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at + /// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs + /// resulting in spuriously empty balance sets. pub fn check_and_update_full_resolution_status(&self, logger: &L) -> (bool, bool) { let mut is_all_funds_claimed = self.get_claimable_balances().is_empty(); let current_height = self.current_best_block().height; @@ -2034,11 +2039,10 @@ impl ChannelMonitor { // once processed, implies the preimage exists in the corresponding inbound channel. let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty(); - const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) { (Some(balances_empty_height), true, true) => { // Claimed all funds, check if reached the blocks threshold. - (current_height >= balances_empty_height + BLOCKS_THRESHOLD, false) + (current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false) }, (Some(_), false, _)|(Some(_), _, false) => { // previously assumed we claimed all funds, but we have new funds to claim or @@ -2058,7 +2062,7 @@ impl ChannelMonitor { // None. It is set to the current block height. log_debug!(logger, "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks", - inner.get_funding_txo().0, BLOCKS_THRESHOLD); + inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS); inner.balances_empty_height = Some(current_height); (false, true) }, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9556e988b4e..e2c76643348 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -246,19 +246,19 @@ fn archive_fully_resolved_monitors() { // At this point, both nodes have no more `Balance`s, but nodes[0]'s `ChannelMonitor` still // hasn't had the `MonitorEvent` that contains the preimage claimed by the `ChannelManager`. - // Thus, calling `archive_fully_resolved_channel_monitors` and waiting 4032 blocks will not - // result in the `ChannelMonitor` being archived. + // Thus, calling `archive_fully_resolved_channel_monitors` and waiting `ARCHIVAL_DELAY_BLOCKS` + // blocks will not result in the `ChannelMonitor` being archived. nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4032); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly 4032 - // blocks. + // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly + // `ARCHIVAL_DELAY_BLOCKS` blocks. nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[1], 4031); + connect_blocks(&nodes[1], ARCHIVAL_DELAY_BLOCKS - 1); nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[1], 1); @@ -266,11 +266,11 @@ fn archive_fully_resolved_monitors() { assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 0); // Finally, we process the pending `MonitorEvent` from nodes[0], allowing the `ChannelMonitor` - // to be archived 4032 blocks later. + // to be archived `ARCHIVAL_DELAY_BLOCKS` blocks later. expect_payment_sent(&nodes[0], payment_preimage, None, true, false); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4031); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS - 1); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[0], 1); diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 2be0cb39f4f..7aa41531ce2 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -538,6 +538,15 @@ impl SpendableOutputDescriptor { }; Ok((psbt, expected_max_weight)) } + + /// Returns the outpoint of the spendable output. + pub fn outpoint(&self) -> OutPoint { + match self { + Self::StaticOutput { outpoint, .. } => *outpoint, + Self::StaticPaymentOutput(descriptor) => descriptor.outpoint, + Self::DelayedPaymentOutput(descriptor) => descriptor.outpoint, + } + } } /// The parameters required to derive a channel signer via [`SignerProvider`]. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index b61306194df..0022e5286d2 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -9,7 +9,7 @@ //! sweeping them. use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use crate::chain::channelmonitor::ANTI_REORG_DELAY; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; use crate::ln::msgs::DecodeError; @@ -32,6 +32,9 @@ use bitcoin::{BlockHash, Transaction, Txid}; use core::ops::Deref; +/// The number of blocks we wait before we prune the tracked spendable outputs. +pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; + /// The state of a spendable output currently tracked by an [`OutputSweeper`]. #[derive(Clone, Debug, PartialEq, Eq)] pub struct TrackedSpendableOutput { @@ -71,13 +74,7 @@ impl TrackedSpendableOutput { /// Returns whether the output is spent in the given transaction. pub fn is_spent_in(&self, tx: &Transaction) -> bool { - let prev_outpoint = match &self.descriptor { - SpendableOutputDescriptor::StaticOutput { outpoint, .. } => *outpoint, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.outpoint, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.outpoint, - } - .into_bitcoin_outpoint(); - + let prev_outpoint = self.descriptor.outpoint().into_bitcoin_outpoint(); tx.input.iter().any(|input| input.previous_output == prev_outpoint) } } @@ -107,7 +104,11 @@ pub enum OutputSpendStatus { latest_spending_tx: Transaction, }, /// A transaction spending the output has been confirmed on-chain but will be tracked until it - /// reaches [`ANTI_REORG_DELAY`] confirmations. + /// reaches at least [`PRUNE_DELAY_BLOCKS`] confirmations to ensure [`Event::SpendableOutputs`] + /// stemming from lingering [`ChannelMonitor`]s can safely be replayed. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor PendingThresholdConfirmations { /// The hash of the chain tip when we first broadcast a transaction spending this output. first_broadcast_hash: BlockHash, @@ -530,7 +531,9 @@ where // Prune all outputs that have sufficient depth by now. sweeper_state.outputs.retain(|o| { if let Some(confirmation_height) = o.status.confirmation_height() { - if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { + // We wait at least `PRUNE_DELAY_BLOCKS` as before that + // `Event::SpendableOutputs` from lingering monitors might get replayed. + if cur_height >= confirmation_height + PRUNE_DELAY_BLOCKS - 1 { log_debug!(self.logger, "Pruning swept output as sufficiently confirmed via spend in transaction {:?}. Pruned descriptor: {:?}", o.status.latest_spending_tx().map(|t| t.compute_txid()), o.descriptor