Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Queue BackgroundEvent to force close channels upon ChannelManager::read #2059

Merged
3 changes: 2 additions & 1 deletion lightning-persister/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ mod tests {
use bitcoin::{Txid, TxMerkleNode};
use lightning::chain::ChannelMonitorUpdateStatus;
use lightning::chain::chainmonitor::Persist;
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
use lightning::chain::transaction::OutPoint;
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
use lightning::ln::functional_test_utils::*;
Expand Down Expand Up @@ -253,7 +254,7 @@ mod tests {
check_added_monitors!(nodes[1], 1);

// Make sure everything is persisted as expected after close.
check_persisted_data!(11);
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
}

// Test that if the persister's path to channel data is read-only, writing a
Expand Down
67 changes: 36 additions & 31 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,34 +69,36 @@ use crate::sync::{Mutex, LockTestExt};
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
/// increasing and increase by one for each new update, with one exception specified below.
/// increasing and increase by one for each new update, with two exceptions specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
/// The only instance where update_id values are not strictly increasing is the case where we
/// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
/// its docs for more details.
/// The only instances we allow where update_id values are not strictly increasing have a
/// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
/// will force close the channel by broadcasting the latest commitment transaction or
/// special post-force-close updates, like providing preimages necessary to claim outputs on the
/// broadcast commitment transaction. See its docs for more details.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
}

/// If:
/// (1) a channel has been force closed and
/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
/// this channel's (the backward link's) broadcasted commitment transaction
/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
/// with the update providing said payment preimage. No other update types are allowed after
/// force-close.
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
///
/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
/// (2) providing a preimage (after the channel has been force closed) from a forward link that
/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
/// commitment transaction.
///
/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;

impl Writeable for ChannelMonitorUpdate {
Expand Down Expand Up @@ -488,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,

);

#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
Expand Down Expand Up @@ -1201,17 +1202,6 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
}

pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(
&self,
broadcaster: &B,
logger: &L,
) where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
}

/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// itself.
///
Expand Down Expand Up @@ -2265,14 +2255,22 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
{
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
// ChannelMonitor updates may be applied after force close if we receive a
// preimage for a broadcasted commitment transaction HTLC output that we'd
// like to claim on-chain. If this is the case, we no longer have guaranteed
// access to the monitor's update ID, so we use a sentinel value instead.
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
// sentinel value instead.
//
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
// them as well.
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
// We should have already seen a `ChannelForceClosed` update if we're trying to
// provide a preimage at this point.
ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
Expand Down Expand Up @@ -2364,6 +2362,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
},
}
}

// If the updates succeeded and we were in an already closed channel state, then there's no
// need to refuse any updates we expect to receive afer seeing a confirmed commitment.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only case where we might have a secondary CLOSED_CHANNEL_UPDATED_ID arises from post-force-close preimage forwarding (i.e ChannelMonitorUpdateStep::PaymentPreimage) ? This could be verified.

if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
return Ok(());
}

self.latest_update_id = updates.update_id;

if ret.is_ok() && self.funding_spend_seen {
Expand Down
4 changes: 2 additions & 2 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::routing::gossip::NodeId;
Expand Down Expand Up @@ -6081,7 +6081,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
self.latest_monitor_update_id += 1;
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
Expand Down
29 changes: 21 additions & 8 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7310,6 +7310,7 @@ where
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
Expand Down Expand Up @@ -7339,9 +7340,11 @@ where
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
if let Some(monitor_update) = monitor_update {
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
}
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
channel_closures.push(events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
Expand Down Expand Up @@ -7406,10 +7409,13 @@ where
}
}

for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
};
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
}
}

Expand Down Expand Up @@ -7462,10 +7468,17 @@ where
}

let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
0 => {
let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
if pending_background_events.iter().find(|e| {
let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
*pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
}).is_none() {
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
}
}
_ => return Err(DecodeError::InvalidValue),
}
}
Expand Down Expand Up @@ -7840,7 +7853,7 @@ where
per_peer_state: FairRwLock::new(per_peer_state),

pending_events: Mutex::new(pending_events_read),
pending_background_events: Mutex::new(pending_background_events_read),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),

Expand Down
53 changes: 26 additions & 27 deletions lightning/src/ln/monitor_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1859,15 +1859,18 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);

// Serialize Bob with the initial state of both channels, which we'll use later.
let bob_serialized = nodes[1].node.encode();

// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);

// Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
// point such that he broadcasts a revoked commitment transaction.
let bob_serialized = nodes[1].node.encode();
// Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
// at this point such that he broadcasts a revoked commitment transaction with the HTLCs
// present.
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();

Expand Down Expand Up @@ -1897,30 +1900,26 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
}
}

// Bob force closes by broadcasting his revoked state for each channel.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_a = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_a.3);
revoked_commitment
};
nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_b.3);
revoked_commitment
// Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
// broadcast the latest commitment transaction known to them, which in our case is the one with
// the HTLCs still pending.
nodes[1].node.timer_tick_occurred();
check_added_monitors(&nodes[1], 2);
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
let (revoked_commitment_a, revoked_commitment_b) = {
let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 2);
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
check_spends!(&txn[0], &chan_a.3);
check_spends!(&txn[1], &chan_b.3);
(txn[0].clone(), txn[1].clone())
} else {
check_spends!(&txn[1], &chan_a.3);
check_spends!(&txn[0], &chan_b.3);
(txn[1].clone(), txn[0].clone())
}
};

// Bob should now receive two events to bump his revoked commitment transaction fees.
Expand Down
15 changes: 12 additions & 3 deletions lightning/src/ln/payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,9 +334,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
nodes[0].node.timer_tick_occurred();
if !confirm_before_reload {
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
} else {
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
}
check_added_monitors!(nodes[0], 1);

nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
Expand Down Expand Up @@ -499,9 +505,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
check_added_monitors!(nodes[0], 1);

nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
Expand Down Expand Up @@ -2794,6 +2802,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
// the double-claim that would otherwise appear at the end of this test.
nodes[0].node.timer_tick_occurred();
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);

Expand Down
Loading