Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Work around LND bug 6039 #2507

Merged
merged 2 commits into from
Aug 22, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 12 additions & 7 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3811,6 +3811,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
}
}

/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
assert!(self.context.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.context.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
})
} else { None }
}

/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
///
Expand Down Expand Up @@ -3877,13 +3888,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
self.context.sent_message_awaiting_response = None;

let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
assert!(self.context.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.context.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
})
} else { None };
let shutdown_msg = self.get_outbound_shutdown();

let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);

Expand Down
30 changes: 30 additions & 0 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7489,6 +7489,36 @@ where
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);

match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
{
// LND hasn't properly handled shutdown messages ever, and force-closes any time we
// send one while HTLCs are still present. The issue is tracked at
// https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
// to fix it but none so far have managed to land upstream. The issue appears to be
// very low priority for the LND team despite being marked "P1".
// We're not going to bother handling this in a sensible way, instead simply
// repeating the Shutdown message on repeat until morale improves.
if msg.channel_id != [0; 32] {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
if let Some(msg) = chan.get_outbound_shutdown() {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg,
});
}
}
}
return;
}
_ => {}
}

if msg.channel_id == [0; 32] {
let channel_ids: Vec<[u8; 32]> = {
let per_peer_state = self.per_peer_state.read().unwrap();
Expand Down
49 changes: 49 additions & 0 deletions lightning/src/ln/shutdown_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,55 @@ fn expect_channel_shutdown_state_with_htlc() {
assert!(nodes[0].node.list_channels().is_empty());
}

#[test]
fn test_lnd_bug_6039() {
// LND sends a nonsense error message any time it gets a shutdown if there are still HTLCs
// pending. We currently swallow that error to work around LND's bug #6039. This test emulates
// the LND nonsense and ensures we at least kinda handle it.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);

let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);

nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);

// Generate an lnd-like error message and check that we respond by simply screaming louder to
// see if LND will accept our protocol compliance.
let err_msg = msgs::ErrorMessage { channel_id: chan.2, data: "link failed to shutdown".to_string() };
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &err_msg);
let _node_0_repeated_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());

let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());

assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());

claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);

// Assume that LND will eventually respond to our Shutdown if we clear all the remaining HTLCs
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);

// ClosingSignNegotion process
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);

// Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
assert!(nodes[0].node.list_channels().is_empty());
}

#[test]
fn expect_channel_shutdown_state_with_force_closure() {
// Test sending a shutdown prior to channel_ready after funding generation
Expand Down