Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix potential peer_state deadlocks in finish_force_close_channel #2597

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 34 additions & 12 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2626,8 +2626,13 @@ where
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}

#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}

let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
Expand All @@ -2653,8 +2658,7 @@ where
let peer_state_mutex = per_peer_state.get(peer_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let (update_opt, counterparty_node_id) = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let mut peer_state = peer_state_mutex.lock().unwrap();
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
Expand All @@ -2664,6 +2668,8 @@ where
log_error!(self.logger, "Force-closing channel {}", channel_id);
self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
mem::drop(peer_state);
mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
Expand All @@ -2686,10 +2692,17 @@ where
}
};
if let Some(update) = update_opt {
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
// Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
// not try to broadcast it via whatever peer we have.
let per_peer_state = self.per_peer_state.read().unwrap();
let a_peer_state_opt = per_peer_state.get(peer_node_id)
.ok_or(per_peer_state.values().next());
if let Ok(a_peer_state_mutex) = a_peer_state_opt {
let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}

Ok(counterparty_node_id)
Expand Down Expand Up @@ -4627,8 +4640,9 @@ where
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut pending_peers_awaiting_removal = Vec::new();
let mut shutdown_channels = Vec::new();

let process_unfunded_channel_tick = |
let mut process_unfunded_channel_tick = |
chan_id: &ChannelId,
context: &mut ChannelContext<SP>,
unfunded_context: &mut UnfundedChannelContext,
Expand All @@ -4641,7 +4655,7 @@ where
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
self.finish_force_close_channel(context.force_shutdown(false));
shutdown_channels.push(context.force_shutdown(false));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
Expand Down Expand Up @@ -4834,6 +4848,10 @@ where
let _ = handle_error!(self, err, counterparty_node_id);
}

for shutdown_res in shutdown_channels {
self.finish_force_close_channel(shutdown_res);
}

self.pending_outbound_payments.remove_stale_payments(&self.pending_events);

// Technically we don't need to do this here, but if we have holding cell entries in a
Expand Down Expand Up @@ -4990,6 +5008,7 @@ where
// This ensures that future code doesn't introduce a lock-order requirement for
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
// this function with any `per_peer_state` peer lock acquired would.
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
Expand Down Expand Up @@ -5997,7 +6016,8 @@ where
}

fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
let mut finish_shutdown = None;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
Expand Down Expand Up @@ -6042,8 +6062,7 @@ where
log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
return Ok(());
finish_shutdown = Some(chan.context_mut().force_shutdown(false));
},
}
} else {
Expand All @@ -6055,6 +6074,9 @@ where
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
if let Some(shutdown_res) = finish_shutdown {
self.finish_force_close_channel(shutdown_res);
}

Ok(())
}
Expand Down
22 changes: 22 additions & 0 deletions lightning/src/ln/shutdown_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,28 @@ fn test_lnd_bug_6039() {
assert!(nodes[0].node.list_channels().is_empty());
}

#[test]
fn shutdown_on_unfunded_channel() {
// Test receiving a shutdown prior to funding generation
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);

nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None).unwrap();
let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());

// P2WSH
let script = Builder::new().push_int(0)
.push_slice(&[0; 20])
.into_script();
Comment on lines +267 to +270
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: P2WPKH*

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤦 Will fix in #2591


nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &msgs::Shutdown {
channel_id: open_chan.temporary_channel_id, scriptpubkey: script,
});
check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000);
}

#[test]
fn expect_channel_shutdown_state_with_force_closure() {
// Test sending a shutdown prior to channel_ready after funding generation
Expand Down