Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement non-strict forwarding #3127

Merged
merged 1 commit into from
Jun 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1366,8 +1366,8 @@ mod tests {

// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)

// we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
Expand Down Expand Up @@ -1478,8 +1478,8 @@ mod tests {
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);

// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);

// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
Expand Down Expand Up @@ -1602,8 +1602,8 @@ mod tests {

// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)

// connect a block with one transaction of len 125
Expand Down
190 changes: 121 additions & 69 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4895,8 +4895,8 @@ where
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
() => {
for forward_info in pending_forwards.drain(..) {
($forward_infos: expr) => {
for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
Expand Down Expand Up @@ -5004,104 +5004,156 @@ where
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
forwarding_channel_not_found!();
forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
forwarding_counterparty = Some(counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
forwarding_channel_not_found!();
forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
onion_packet, blinded, ..
}, skimmed_fee_msat, ..
let mut draining_pending_forwards = pending_forwards.drain(..);
while let Some(forward_info) = draining_pending_forwards.next() {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
ref onion_packet, blinded, ..
}, skimmed_fee_msat, ..
},
}) => {
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
blinded_failure: blinded.map(|b| b.failure),
});
let next_blinding_point = blinded.and_then(|b| {
let encrypted_tlvs_ss = self.node_signer.ecdh(
Recipient::Node, &b.inbound_blinding_point, None
).unwrap().secret_bytes();
onion_utils::next_hop_pubkey(
&self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
).ok()
});

// Forward the HTLC over the most appropriate channel with the corresponding peer,
// applying non-strict forwarding.
// The channel with the least amount of outbound liquidity will be used to maximize the
// probability of being able to successfully forward a subsequent HTLC.
let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
ChannelPhase::Funded(chan) => {
let balances = chan.context.get_available_balances(&self.fee_estimator);
if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
chan.context.is_usable() {
Some((chan, balances))
} else {
None
}
},
}) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
blinded_failure: blinded.map(|b| b.failure),
});
let next_blinding_point = blinded.and_then(|b| {
let encrypted_tlvs_ss = self.node_signer.ecdh(
Recipient::Node, &b.inbound_blinding_point, None
).unwrap().secret_bytes();
onion_utils::next_hop_pubkey(
&self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
).ok()
});
if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
&&logger)
{
if let ChannelError::Ignore(msg) = e {
log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
_ => None,
}).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
let optimal_channel = match maybe_optimal_channel {
Some(chan) => chan,
None => {
// Fall back to the specified channel to return an appropriate error.
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
chan
} else {
panic!("Stated return value requirements in send_htlc() were not met");
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
}
};

let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
"specified"
} else {
"alternate"
};
log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
&&logger)
{
if let ChannelError::Ignore(msg) = e {
log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}

if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
continue;
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
}
None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
},
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
},
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
},
};
if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
if let Err(e) = queue_fail_htlc_res {
if let ChannelError::Ignore(msg) = e {
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
},
};
if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
if let Err(e) = queue_fail_htlc_res {
if let ChannelError::Ignore(msg) = e {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
} else {
panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
// fail-backs are best-effort, we probably already have one
// pending, and if not that's OK, if not, the channel is on
// the chain and sending the HTLC-Timeout is their problem.
continue;
} else {
panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
// fail-backs are best-effort, we probably already have one
// pending, and if not that's OK, if not, the channel is on
// the chain and sending the HTLC-Timeout is their problem.
continue;
}
}
} else {
forwarding_channel_not_found!();
continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
Expand Down
Loading
Loading