diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 39fa3a237a6..1fe3fcd9ff1 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -620,9 +620,8 @@ where C::Target: chain::Filter, pub fn rebroadcast_pending_claims(&self) { let monitors = self.monitors.read().unwrap(); for (_, monitor_holder) in &*monitors { - let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor); monitor_holder.monitor.rebroadcast_pending_claims( - &*self.broadcaster, &*self.fee_estimator, &logger + &*self.broadcaster, &*self.fee_estimator, &self.logger ) } } @@ -640,9 +639,8 @@ where fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { - let logger = WithChannelMonitor::from(&self.logger, &monitor); monitor.block_connected( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger) + header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) }); } @@ -650,9 +648,8 @@ where let monitor_states = self.monitors.read().unwrap(); log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height); for monitor_state in monitor_states.values() { - let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor); monitor_state.monitor.block_disconnected( - header, height, &*self.broadcaster, &*self.fee_estimator, &logger); + header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger); } } } @@ -669,9 +666,8 @@ where fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash()); self.process_chain_data(header, None, txdata, |monitor, txdata| { - let logger = WithChannelMonitor::from(&self.logger, &monitor); monitor.transactions_confirmed( - header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger) + header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger) }); } @@ -679,20 +675,19 @@ where log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); let monitor_states = self.monitors.read().unwrap(); for monitor_state in monitor_states.values() { - let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor); - monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &logger); + monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger); } } fn best_block_updated(&self, header: &Header, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height); self.process_chain_data(header, Some(height), &[], |monitor, txdata| { - let logger = WithChannelMonitor::from(&self.logger, &monitor); // While in practice there shouldn't be any recursive calls when given empty txdata, // it's still possible if a chain::Filter implementation returns a transaction. debug_assert!(txdata.is_empty()); monitor.best_block_updated( - header, height, &*self.broadcaster, &*self.fee_estimator, &logger) + header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger + ) }); } @@ -758,8 +753,7 @@ where C::Target: chain::Filter, fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // Update the monitor that watches the channel referred to by the given outpoint. - let monitors_lock = self.monitors.read().unwrap(); - let monitors = monitors_lock.deref(); + let monitors = self.monitors.read().unwrap(); match monitors.get(&funding_txo) { None => { log_error!(self.logger, "Failed to update channel monitor: no such monitor registered"); @@ -802,6 +796,7 @@ where C::Target: chain::Filter, ChannelMonitorUpdateStatus::UnrecoverableError => { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. + core::mem::drop(pending_monitor_updates); core::mem::drop(monitors); let _poison = self.monitors.write().unwrap(); let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e0f19f39d47..ce1ef9128f9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1139,12 +1139,16 @@ impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger } } -impl<'a, 'b, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger { - pub(crate) fn from(logger: &'a L, monitor: &'b ChannelMonitor) -> Self { +impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger { + pub(crate) fn from(logger: &'a L, monitor: &ChannelMonitor) -> Self { + Self::from_impl(logger, &*monitor.inner.lock().unwrap()) + } + + pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl) -> Self { + let peer_id = monitor_impl.counterparty_node_id; + let channel_id = Some(monitor_impl.funding_info.0.to_channel_id()); WithChannelMonitor { - logger, - peer_id: monitor.get_counterparty_node_id(), - channel_id: Some(monitor.get_funding_txo().0.to_channel_id()), + logger, peer_id, channel_id, } } } @@ -1282,9 +1286,11 @@ impl ChannelMonitor { ) where L::Target: Logger { - self.inner.lock().unwrap().provide_initial_counterparty_commitment_tx(txid, + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.provide_initial_counterparty_commitment_tx(txid, htlc_outputs, commitment_number, their_cur_per_commitment_point, feerate_per_kw, - to_broadcaster_value_sat, to_countersignatory_value_sat, logger); + to_broadcaster_value_sat, to_countersignatory_value_sat, &logger); } /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction. @@ -1300,8 +1306,10 @@ impl ChannelMonitor { their_per_commitment_point: PublicKey, logger: &L, ) where L::Target: Logger { - self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx( - txid, htlc_outputs, commitment_number, their_per_commitment_point, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.provide_latest_counterparty_commitment_tx( + txid, htlc_outputs, commitment_number, their_per_commitment_point, &logger) } #[cfg(test)] @@ -1326,8 +1334,10 @@ impl ChannelMonitor { F::Target: FeeEstimator, L::Target: Logger, { - self.inner.lock().unwrap().provide_payment_preimage( - payment_hash, payment_preimage, broadcaster, fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.provide_payment_preimage( + payment_hash, payment_preimage, broadcaster, fee_estimator, &logger) } /// Updates a ChannelMonitor on the basis of some new information provided by the Channel @@ -1346,7 +1356,9 @@ impl ChannelMonitor { F::Target: FeeEstimator, L::Target: Logger, { - self.inner.lock().unwrap().update_monitor(updates, broadcaster, fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this @@ -1525,7 +1537,9 @@ impl ChannelMonitor { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager pub fn get_latest_holder_commitment_txn(&self, logger: &L) -> Vec where L::Target: Logger { - self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.get_latest_holder_commitment_txn(&logger) } /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework @@ -1534,7 +1548,9 @@ impl ChannelMonitor { #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))] pub fn unsafe_get_latest_holder_commitment_txn(&self, logger: &L) -> Vec where L::Target: Logger { - self.inner.lock().unwrap().unsafe_get_latest_holder_commitment_txn(logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.unsafe_get_latest_holder_commitment_txn(&logger) } /// Processes transactions in a newly connected block, which may result in any of the following: @@ -1555,15 +1571,17 @@ impl ChannelMonitor { height: u32, broadcaster: B, fee_estimator: F, - logger: L, + logger: &L, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - self.inner.lock().unwrap().block_connected( - header, txdata, height, broadcaster, fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.block_connected( + header, txdata, height, broadcaster, fee_estimator, &logger) } /// Determines if the disconnected block contained any transactions of interest and updates @@ -1574,14 +1592,16 @@ impl ChannelMonitor { height: u32, broadcaster: B, fee_estimator: F, - logger: L, + logger: &L, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - self.inner.lock().unwrap().block_disconnected( - header, height, broadcaster, fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.block_disconnected( + header, height, broadcaster, fee_estimator, &logger) } /// Processes transactions confirmed in a block with the given header and height, returning new @@ -1598,7 +1618,7 @@ impl ChannelMonitor { height: u32, broadcaster: B, fee_estimator: F, - logger: L, + logger: &L, ) -> Vec where B::Target: BroadcasterInterface, @@ -1606,8 +1626,10 @@ impl ChannelMonitor { L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); - self.inner.lock().unwrap().transactions_confirmed( - header, txdata, height, broadcaster, &bounded_fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.transactions_confirmed( + header, txdata, height, broadcaster, &bounded_fee_estimator, &logger) } /// Processes a transaction that was reorganized out of the chain. @@ -1621,15 +1643,18 @@ impl ChannelMonitor { txid: &Txid, broadcaster: B, fee_estimator: F, - logger: L, + logger: &L, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); - self.inner.lock().unwrap().transaction_unconfirmed( - txid, broadcaster, &bounded_fee_estimator, logger); + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.transaction_unconfirmed( + txid, broadcaster, &bounded_fee_estimator, &logger + ); } /// Updates the monitor with the current best chain tip, returning new outputs to watch. See @@ -1645,7 +1670,7 @@ impl ChannelMonitor { height: u32, broadcaster: B, fee_estimator: F, - logger: L, + logger: &L, ) -> Vec where B::Target: BroadcasterInterface, @@ -1653,8 +1678,11 @@ impl ChannelMonitor { L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); - self.inner.lock().unwrap().best_block_updated( - header, height, broadcaster, &bounded_fee_estimator, logger) + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); + inner.best_block_updated( + header, height, broadcaster, &bounded_fee_estimator, &logger + ) } /// Returns the set of txids that should be monitored for re-organization out of the chain. @@ -1682,7 +1710,7 @@ impl ChannelMonitor { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. pub fn rebroadcast_pending_claims( - &self, broadcaster: B, fee_estimator: F, logger: L, + &self, broadcaster: B, fee_estimator: F, logger: &L, ) where B::Target: BroadcasterInterface, @@ -1691,6 +1719,7 @@ impl ChannelMonitor { { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner); let current_height = inner.best_block.height; inner.onchain_tx_handler.rebroadcast_pending_claims( current_height, &broadcaster, &fee_estimator, &logger, @@ -2399,13 +2428,11 @@ impl ChannelMonitorImpl { Ok(()) } - pub(crate) fn provide_initial_counterparty_commitment_tx( + fn provide_initial_counterparty_commitment_tx( &mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_per_commitment_point: PublicKey, feerate_per_kw: u32, - to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &L - ) - where L::Target: Logger - { + to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &WithChannelMonitor, + ) where L::Target: Logger { self.initial_counterparty_commitment_info = Some((their_per_commitment_point.clone(), feerate_per_kw, to_broadcaster_value, to_countersignatory_value)); @@ -2418,7 +2445,11 @@ impl ChannelMonitorImpl { their_per_commitment_point, logger); } - pub(crate) fn provide_latest_counterparty_commitment_tx(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger { + fn provide_latest_counterparty_commitment_tx( + &mut self, txid: Txid, + htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)>, + commitment_number: u64, their_per_commitment_point: PublicKey, logger: &WithChannelMonitor, + ) where L::Target: Logger { // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction // so that a remote monitor doesn't learn anything unless there is a malicious close. // (only maybe, sadly we cant do the same for local info, as we need to be aware of @@ -2551,7 +2582,7 @@ impl ChannelMonitorImpl { /// commitment_tx_infos which contain the payment hash have been revoked. fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L) + fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -2628,9 +2659,9 @@ impl ChannelMonitorImpl { } } - pub(crate) fn broadcast_latest_holder_commitment_txn(&mut self, broadcaster: &B, logger: &L) + fn broadcast_latest_holder_commitment_txn(&mut self, broadcaster: &B, logger: &WithChannelMonitor) where B::Target: BroadcasterInterface, - L::Target: Logger, + L::Target: Logger, { let commit_txs = self.get_latest_holder_commitment_txn(logger); let mut txs = vec![]; @@ -2642,7 +2673,9 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); } - pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()> + fn update_monitor( + &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor + ) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -2787,15 +2820,15 @@ impl ChannelMonitorImpl { } else { ret } } - pub fn get_latest_update_id(&self) -> u64 { + fn get_latest_update_id(&self) -> u64 { self.latest_update_id } - pub fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) { + fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) { &self.funding_info } - pub fn get_outputs_to_watch(&self) -> &HashMap> { + fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because // its trivial to do, double-check that here. @@ -2805,7 +2838,7 @@ impl ChannelMonitorImpl { &self.outputs_to_watch } - pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec { + fn get_and_clear_pending_monitor_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_monitor_events); ret @@ -2880,7 +2913,7 @@ impl ChannelMonitorImpl { ret } - pub(crate) fn initial_counterparty_commitment_tx(&mut self) -> Option { + fn initial_counterparty_commitment_tx(&mut self) -> Option { let (their_per_commitment_point, feerate_per_kw, to_broadcaster_value, to_countersignatory_value) = self.initial_counterparty_commitment_info?; let htlc_outputs = vec![]; @@ -2914,7 +2947,7 @@ impl ChannelMonitorImpl { channel_parameters) } - pub(crate) fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec { + fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec { update.updates.iter().filter_map(|update| { match update { &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, @@ -2940,7 +2973,7 @@ impl ChannelMonitorImpl { }).collect() } - pub(crate) fn sign_to_local_justice_tx( + fn sign_to_local_justice_tx( &self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64 ) -> Result { let secret = self.get_secret(commitment_number).ok_or(())?; @@ -2968,15 +3001,15 @@ impl ChannelMonitorImpl { self.commitment_secrets.get_secret(idx) } - pub(crate) fn get_min_seen_secret(&self) -> u64 { + fn get_min_seen_secret(&self) -> u64 { self.commitment_secrets.get_min_seen_secret() } - pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 { + fn get_cur_counterparty_commitment_number(&self) -> u64 { self.current_counterparty_commitment_number } - pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 { + fn get_cur_holder_commitment_number(&self) -> u64 { self.current_holder_commitment_number } @@ -3323,7 +3356,9 @@ impl ChannelMonitorImpl { } } - pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { + fn get_latest_holder_commitment_txn( + &mut self, logger: &WithChannelMonitor, + ) -> Vec where L::Target: Logger { log_debug!(logger, "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); @@ -3362,7 +3397,9 @@ impl ChannelMonitorImpl { #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! - fn unsafe_get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { + fn unsafe_get_latest_holder_commitment_txn( + &mut self, logger: &WithChannelMonitor + ) -> Vec where L::Target: Logger { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); @@ -3389,10 +3426,13 @@ impl ChannelMonitorImpl { holder_transactions } - pub fn block_connected(&mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L) -> Vec + fn block_connected( + &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, + fee_estimator: F, logger: &WithChannelMonitor, + ) -> Vec where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, + F::Target: FeeEstimator, + L::Target: Logger, { let block_hash = header.block_hash(); self.best_block = BestBlock::new(block_hash, height); @@ -3407,7 +3447,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: L, + logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, @@ -3418,7 +3458,7 @@ impl ChannelMonitorImpl { if height > self.best_block.height() { self.best_block = BestBlock::new(block_hash, height); - self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger) + self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger) } else if block_hash != self.best_block.block_hash() { self.best_block = BestBlock::new(block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); @@ -3434,7 +3474,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: L, + logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, @@ -3551,9 +3591,9 @@ impl ChannelMonitorImpl { break; } } - self.is_resolving_htlc_output(&tx, height, &block_hash, &logger); + self.is_resolving_htlc_output(&tx, height, &block_hash, logger); - self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, &logger); + self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger); } } @@ -3561,7 +3601,7 @@ impl ChannelMonitorImpl { self.best_block = BestBlock::new(block_hash, height); } - self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger) + self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger) } /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update @@ -3581,7 +3621,7 @@ impl ChannelMonitorImpl { mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &L, + logger: &WithChannelMonitor, ) -> Vec where B::Target: BroadcasterInterface, @@ -3726,10 +3766,11 @@ impl ChannelMonitorImpl { watch_outputs } - pub fn block_disconnected(&mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: L) - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, + fn block_disconnected( + &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor + ) where B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, { log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height); @@ -3749,7 +3790,7 @@ impl ChannelMonitorImpl { txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: L, + logger: &WithChannelMonitor, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -3828,7 +3869,9 @@ impl ChannelMonitorImpl { false } - fn should_broadcast_holder_commitment_txn(&self, logger: &L) -> bool where L::Target: Logger { + fn should_broadcast_holder_commitment_txn( + &self, logger: &WithChannelMonitor + ) -> bool where L::Target: Logger { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. if self.funding_spend_confirmed.is_some() || @@ -3904,7 +3947,9 @@ impl ChannelMonitorImpl { /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC - fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger { + fn is_resolving_htlc_output( + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, + ) where L::Target: Logger { 'outer_loop: for input in &tx.input { let mut payment_data = None; let htlc_claim = HTLCClaim::from_witness(&input.witness); @@ -4145,7 +4190,7 @@ impl ChannelMonitorImpl { /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. fn check_tx_and_push_spendable_outputs( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, ) where L::Target: Logger { for spendable_output in self.get_spendable_outputs(tx) { let entry = OnchainEventEntry { @@ -4168,11 +4213,11 @@ where L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0)); + self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3); } fn block_disconnected(&self, header: &Header, height: u32) { - self.0.block_disconnected(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0)); + self.0.block_disconnected(header, height, &*self.1, &*self.2, &self.3); } } @@ -4184,15 +4229,15 @@ where L::Target: Logger, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0)); + self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3); } fn transaction_unconfirmed(&self, txid: &Txid) { - self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0)); + self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3); } fn best_block_updated(&self, header: &Header, height: u32) { - self.0.best_block_updated(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0)); + self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2871abbc223..bbed782bb57 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -473,14 +473,13 @@ impl OnchainTxHandler /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. - pub(crate) fn rebroadcast_pending_claims( + pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); for (claim_id, request) in self.pending_claim_requests.iter() { @@ -528,13 +527,11 @@ impl OnchainTxHandler /// /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. - fn generate_claim( + fn generate_claim( &mut self, cur_height: u32, cached_request: &PackageTemplate, force_feerate_bump: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u32, u64, OnchainClaim)> - where - F::Target: FeeEstimator, - L::Target: Logger, + where F::Target: FeeEstimator, { let request_outpoints = cached_request.outpoints(); if request_outpoints.is_empty() { @@ -688,13 +685,12 @@ impl OnchainTxHandler /// `conf_height` represents the height at which the request was generated. This /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. - pub(crate) fn update_claims_view_from_requests( + pub(super) fn update_claims_view_from_requests( &mut self, requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len()); let mut preprocessed_requests = Vec::with_capacity(requests.len()); @@ -809,13 +805,12 @@ impl OnchainTxHandler /// `conf_height` represents the height at which the transactions in `txn_matched` were /// confirmed. This does not need to equal the current blockchain tip height, which should be /// provided via `cur_height`, however it must never be higher than `cur_height`. - pub(crate) fn update_claims_view_from_matched_txn( + pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height); let mut bump_candidates = HashMap::new(); @@ -977,16 +972,15 @@ impl OnchainTxHandler } } - pub(crate) fn transaction_unconfirmed( + pub(super) fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: L, + logger: &L, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, - L::Target: Logger, { let mut height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { @@ -1001,10 +995,9 @@ impl OnchainTxHandler } } - pub(crate) fn block_disconnected(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: L) + pub(super) fn block_disconnected(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, + F::Target: FeeEstimator, { let mut bump_candidates = HashMap::new(); let onchain_events_awaiting_threshold_conf = @@ -1034,7 +1027,7 @@ impl OnchainTxHandler // `height` is the height being disconnected, so our `current_height` is 1 lower. let current_height = height - 1; if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( - current_height, &request, true /* force_feerate_bump */, fee_estimator, &&*logger + current_height, &request, true /* force_feerate_bump */, fee_estimator, logger ) { request.set_timer(new_timer); request.set_feerate(new_feerate); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 7c488816048..efc32bf7d40 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -908,10 +908,10 @@ impl PackageTemplate { } htlcs } - pub(crate) fn finalize_malleable_package( + pub(crate) fn finalize_malleable_package( &self, current_height: u32, onchain_handler: &mut OnchainTxHandler, value: u64, destination_script: ScriptBuf, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { debug_assert!(self.is_malleable()); let mut bumped_tx = Transaction { version: 2, @@ -932,9 +932,9 @@ impl PackageTemplate { log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid()); Some(bumped_tx) } - pub(crate) fn finalize_untractable_package( + pub(crate) fn finalize_untractable_package( &self, onchain_handler: &mut OnchainTxHandler, logger: &L, - ) -> Option where L::Target: Logger { + ) -> Option { debug_assert!(!self.is_malleable()); if let Some((outpoint, outp)) = self.inputs.first() { if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) { @@ -962,13 +962,11 @@ impl PackageTemplate { /// Returns value in satoshis to be included as package outgoing output amount and feerate /// which was used to generate the value. Will not return less than `dust_limit_sats` for the /// value. - pub(crate) fn compute_package_output( + pub(crate) fn compute_package_output( &self, predicted_weight: u64, dust_limit_sats: u64, force_feerate_bump: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> - where - F::Target: FeeEstimator, - L::Target: Logger, + where F::Target: FeeEstimator, { debug_assert!(self.malleability == PackageMalleability::Malleable, "The package output is fixed for non-malleable packages"); let input_amounts = self.package_amount(); @@ -1111,9 +1109,8 @@ impl Readable for PackageTemplate { /// /// [`OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep /// [`FEERATE_FLOOR_SATS_PER_KW`]: crate::chain::chaininterface::MIN_RELAY_FEE_SAT_PER_1000_WEIGHT -fn compute_fee_from_spent_amounts(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u64, u64)> +fn compute_fee_from_spent_amounts(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u64, u64)> where F::Target: FeeEstimator, - L::Target: Logger, { let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep); let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight)); @@ -1135,13 +1132,12 @@ fn compute_fee_from_spent_amounts(input_amounts: u64, predic /// feerate, or just use the previous feerate otherwise. If a feerate bump did happen, we also /// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust the /// new fee to meet the RBF policy requirement. -fn feerate_bump( +fn feerate_bump( predicted_weight: u64, input_amounts: u64, previous_feerate: u64, force_feerate_bump: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, - L::Target: Logger, { // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f01454bd5bd..117986acf10 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -35,7 +35,7 @@ use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner}; use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; @@ -2783,14 +2783,13 @@ impl Channel where funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, holder_commitment_tx, best_block, self.context.counterparty_node_id); - let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), counterparty_initial_commitment_tx.feerate_per_kw(), counterparty_initial_commitment_tx.to_broadcaster_value_sat(), - counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor); + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! if self.context.is_batch_funding() { @@ -6996,13 +6995,12 @@ impl InboundV1Channel where SP::Target: SignerProvider { funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, holder_commitment_tx, best_block, self.context.counterparty_node_id); - let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_commitment_tx.trust().txid(), Vec::new(), self.context.cur_counterparty_commitment_transaction_number + 1, self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw, counterparty_initial_commitment_tx.to_broadcaster_value_sat(), - counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor); + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); log_info!(logger, "{} funding_signed for peer for channel {}", if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 796f823e5f7..ff1bd164a2f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3271,7 +3271,7 @@ where /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { let logger = WithChannelContext::from(&self.logger, &chan.context); - log_trace!(logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0)); + log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id()); let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -3282,7 +3282,7 @@ where fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel) -> Result { let logger = WithChannelContext::from(&self.logger, &chan.context); - log_trace!(logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0)); + log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id()); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; let enabled = chan.context.is_usable() && match chan.channel_update_status() { @@ -7061,7 +7061,7 @@ where }, hash_map::Entry::Vacant(_) => { log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure", - log_bytes!(msg.channel_id.0)); + msg.channel_id); // Unfortunately, lnd doesn't force close on errors // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119). // One of the few ways to get an lnd counterparty to force close is by @@ -10499,7 +10499,7 @@ where hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", - if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0)); + if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), htlc.payment_hash); }, hash_map::Entry::Vacant(entry) => { let path_fee = path.fee_msat(); @@ -10785,8 +10785,7 @@ where } } if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { - let logger = WithChannelMonitor::from(&args.logger, previous_hop_monitor); - previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &&logger); + previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger); } } pending_events_read.push_back((events::Event::PaymentClaimed { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index f061772890b..282e0a75e82 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1286,8 +1286,10 @@ impl { - match $thing { + ($peer: expr, $thing: expr) => {{ + let res = $thing; + let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None); + match res { Ok(x) => x, Err(e) => { match e.action { @@ -1297,7 +1299,7 @@ impl { @@ -1306,32 +1308,32 @@ impl { - log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); + log_given_level!(logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); continue }, msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these msgs::ErrorAction::IgnoreError => { - log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); + log_debug!(logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); + log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); self.enqueue_message($peer, &msg); continue; }, msgs::ErrorAction::SendWarningMessage { msg, log_level } => { - log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); + log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); self.enqueue_message($peer, &msg); continue; }, } } } - } + }} } let mut peer_lock = peer_mutex.lock().unwrap(); @@ -2005,25 +2007,29 @@ impl { - log_debug!(self.logger, "Handling SendStfu event in peer_handler for node {} for channel {}", + let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)); + log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendSplice { ref node_id, ref msg} => { - log_debug!(self.logger, "Handling SendSplice event in peer_handler for node {} for channel {}", + let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)); + log_debug!(logger, "Handling SendSplice event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendSpliceAck { ref node_id, ref msg} => { - log_debug!(self.logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", + let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)); + log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendSpliceLocked { ref node_id, ref msg} => { - log_debug!(self.logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", + let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)); + log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);