Merge pull request #2753 from TheBlueMatt/2023-11-inbound-preimages
authorWilmer Paulino <9447167+wpaulino@users.noreply.github.com>
Mon, 4 Dec 2023 21:15:10 +0000 (13:15 -0800)
committerGitHub <noreply@github.com>
Mon, 4 Dec 2023 21:15:10 +0000 (13:15 -0800)
Provide inbound HTLC preimages to the `EcdsaChannelSigner`

33 files changed:
fuzz/src/full_stack.rs
fuzz/src/onion_message.rs
fuzz/src/utils/test_logger.rs
lightning-background-processor/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/src/lib.rs
lightning-rapid-gossip-sync/src/lib.rs
lightning/src/blinded_path/payment.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/events/mod.rs
lightning/src/ln/blinded_payment_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_payment.rs [new file with mode: 0644]
lightning/src/ln/onion_utils.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/sign/ecdsa.rs
lightning/src/sign/mod.rs
lightning/src/sign/taproot.rs
lightning/src/util/logger.rs
lightning/src/util/macro_logger.rs
lightning/src/util/test_channel_signer.rs
lightning/src/util/test_utils.rs
pending_changelog/route-blinding-intro-node.txt [new file with mode: 0644]

index 57c78e76d6bcd675f41cb437613981129dc6469c..725f83af9842caf79e013cf0b1d6edb1bb985d72 100644 (file)
@@ -728,7 +728,7 @@ mod tests {
                pub lines: Mutex<HashMap<(String, String), usize>>,
        }
        impl Logger for TrackingLogger {
-               fn log(&self, record: &Record) {
+               fn log(&self, record: Record) {
                        *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
index 13de5005cf8be1e2fee35f9fc501366bd4e46434..54d15324f3b4e55878a8bf37b89d5777d6cb6f55 100644 (file)
@@ -218,7 +218,7 @@ mod tests {
                pub lines: Mutex<HashMap<(String, String), usize>>,
        }
        impl Logger for TrackingLogger {
-               fn log(&self, record: &Record) {
+               fn log(&self, record: Record) {
                        *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
index f8c96f99bd1c8ddb9c1f9b28d8d7b5fbe67cc27b..5e5817e23f1f92d18f5d83406db2cff23319491d 100644 (file)
@@ -56,7 +56,7 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> {
 }
 
 impl<Out: Output> Logger for TestLogger<Out> {
-       fn log(&self, record: &Record) {
+       fn log(&self, record: Record) {
                write!(LockedWriteAdapter(&self.out),
                        "{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args)
                        .unwrap();
index e4f2ebfb6a9443afe7bfe36f7bb75dbdb162c3ab..95796e8608682842743e4da0d41cb2d96e673fa7 100644 (file)
@@ -1375,9 +1375,9 @@ mod tests {
                        let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
                        let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
                        let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
-                               log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
-                               log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
+                       if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() {
                                break
                        }
                }
@@ -1556,7 +1556,7 @@ mod tests {
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
                        let expected_log = "Persisting scorer".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
+                       if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
                                break
                        }
                }
@@ -1580,7 +1580,7 @@ mod tests {
                                $sleep;
                                let log_entries = $nodes[0].logger.lines.lock().unwrap();
                                let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
-                               if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
+                               if *log_entries.get(&("lightning_background_processor", loop_counter))
                                        .unwrap_or(&0) > 1
                                {
                                        // Wait until the loop has gone around at least twice.
@@ -1792,7 +1792,7 @@ mod tests {
 
                let log_entries = nodes[0].logger.lines.lock().unwrap();
                let expected_log = "Persisting scorer after update".to_string();
-               assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
+               assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
        }
 
        #[tokio::test]
@@ -1838,7 +1838,7 @@ mod tests {
 
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
                        let expected_log = "Persisting scorer after update".to_string();
-                       assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
+                       assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
                });
 
                let (r1, r2) = tokio::join!(t1, t2);
index ed016be3f8eded216c4a35dafe648a44fc7c0550..1248221a32ee3b800d8ec3fec9964d82649d9791 100644 (file)
@@ -14,7 +14,7 @@ use lightning::ln::channelmanager::{PhantomRouteHints, MIN_CLTV_EXPIRY_DELTA};
 use lightning::ln::inbound_payment::{create, create_from_hash, ExpandedKey};
 use lightning::routing::gossip::RoutingFees;
 use lightning::routing::router::{RouteHint, RouteHintHop, Router};
-use lightning::util::logger::Logger;
+use lightning::util::logger::{Logger, Record};
 use secp256k1::PublicKey;
 use core::ops::Deref;
 use core::time::Duration;
@@ -626,6 +626,7 @@ where
 
        log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
        for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
+               let logger = WithChannelDetails::from(logger, &channel);
                if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
                        log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id);
                        continue;
@@ -710,6 +711,7 @@ where
                .into_iter()
                .map(|(_, channel)| channel)
                .filter(|channel| {
+                       let logger = WithChannelDetails::from(logger, &channel);
                        let has_enough_capacity = channel.inbound_capacity_msat >= min_inbound_capacity;
                        let include_channel = if has_pub_unconf_chan {
                                // If we have a public channel, but it doesn't have enough confirmations to (yet)
@@ -790,6 +792,28 @@ fn prefer_current_channel(min_inbound_capacity_msat: Option<u64>, current_channe
        current_channel > candidate_channel
 }
 
+/// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`].
+struct WithChannelDetails<'a, 'b, L: Deref> where L::Target: Logger {
+       /// The logger to delegate to after adding context to the record.
+       logger: &'a L,
+       /// The [`ChannelDetails`] for adding relevant context to the logged record.
+       details: &'b ChannelDetails
+}
+
+impl<'a, 'b, L: Deref> Logger for WithChannelDetails<'a, 'b, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = Some(self.details.counterparty.node_id);
+               record.channel_id = Some(self.details.channel_id);
+               self.logger.log(record)
+       }
+}
+
+impl<'a, 'b, L: Deref> WithChannelDetails<'a, 'b, L> where L::Target: Logger {
+       fn from(logger: &'a L, details: &'b ChannelDetails) -> Self {
+               Self { logger, details }
+       }
+}
+
 #[cfg(test)]
 mod test {
        use core::cell::RefCell;
index d4f75dd6cd8b073d0e29de1ecf76685b8122ab6e..d02f23fdd2cfd8649e9238cdda5a8fba470ace5c 100644 (file)
@@ -571,7 +571,7 @@ mod tests {
 
        pub struct TestLogger();
        impl lightning::util::logger::Logger for TestLogger {
-               fn log(&self, record: &lightning::util::logger::Record) {
+               fn log(&self, record: lightning::util::logger::Record) {
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
        }
index 5a61be7990e2a17270b7c61deeb8944952255735..c15eedabbe19b3a8f70d6a15d64c1d2ca705ae6c 100644 (file)
@@ -49,7 +49,7 @@
 //! # use lightning::util::logger::{Logger, Record};
 //! # struct FakeLogger {}
 //! # impl Logger for FakeLogger {
-//! #     fn log(&self, record: &Record) { }
+//! #     fn log(&self, record: Record) { }
 //! # }
 //! # let logger = FakeLogger {};
 //!
index 4edfb7d8de05bf0e0187941cc91bf4f646fe5557..7b604fbdcb1d8af0a6dffbdfd40b1898d7d19d52 100644 (file)
@@ -118,21 +118,6 @@ impl Writeable for ReceiveTlvs {
        }
 }
 
-// This will be removed once we support forwarding blinded HTLCs, because we'll always read a
-// `BlindedPaymentTlvs` instead.
-impl Readable for ReceiveTlvs {
-       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-               _init_and_read_tlv_stream!(r, {
-                       (12, payment_constraints, required),
-                       (65536, payment_secret, required),
-               });
-               Ok(Self {
-                       payment_secret: payment_secret.0.unwrap(),
-                       payment_constraints: payment_constraints.0.unwrap()
-               })
-       }
-}
-
 impl<'a> Writeable for BlindedPaymentTlvsRef<'a> {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                // TODO: write padding
@@ -187,7 +172,7 @@ pub(super) fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
 }
 
 /// `None` if underflow occurs.
-fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
+pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
        let inbound_amt = inbound_amt_msat as u128;
        let base = payment_relay.fee_base_msat as u128;
        let prop = payment_relay.fee_proportional_millionths as u128;
index 0b7e13f24b0218235cf690bb8652322d7f7db520..39fa3a237a62f02085e904349dc9555daee2c232 100644 (file)
@@ -29,7 +29,7 @@ use bitcoin::hash_types::{Txid, BlockHash};
 use crate::chain;
 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::events;
@@ -359,6 +359,7 @@ where C::Target: chain::Filter,
                process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>
        ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
                let monitor = &monitor_state.monitor;
+               let logger = WithChannelMonitor::from(&self.logger, &monitor);
                let mut txn_outputs;
                {
                        txn_outputs = process(monitor, txdata);
@@ -375,12 +376,12 @@ where C::Target: chain::Filter,
                                }
                        }
 
-                       log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                       log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
                        match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
                                ChannelMonitorUpdateStatus::Completed =>
-                                       log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+                                       log_trace!(logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                ChannelMonitorUpdateStatus::InProgress => {
-                                       log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+                                       log_debug!(logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                        pending_monitor_updates.push(update_id);
                                },
                                ChannelMonitorUpdateStatus::UnrecoverableError => {
@@ -619,8 +620,9 @@ where C::Target: chain::Filter,
        pub fn rebroadcast_pending_claims(&self) {
                let monitors = self.monitors.read().unwrap();
                for (_, monitor_holder) in &*monitors {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
                        monitor_holder.monitor.rebroadcast_pending_claims(
-                               &*self.broadcaster, &*self.fee_estimator, &*self.logger
+                               &*self.broadcaster, &*self.fee_estimator, &logger
                        )
                }
        }
@@ -638,8 +640,9 @@ where
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        monitor.block_connected(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
                });
        }
 
@@ -647,8 +650,9 @@ where
                let monitor_states = self.monitors.read().unwrap();
                log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
                for monitor_state in monitor_states.values() {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
                        monitor_state.monitor.block_disconnected(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &logger);
                }
        }
 }
@@ -665,8 +669,9 @@ where
        fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
                self.process_chain_data(header, None, txdata, |monitor, txdata| {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        monitor.transactions_confirmed(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
                });
        }
 
@@ -674,18 +679,20 @@ where
                log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
                let monitor_states = self.monitors.read().unwrap();
                for monitor_state in monitor_states.values() {
-                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
+                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &logger);
                }
        }
 
        fn best_block_updated(&self, header: &Header, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        // While in practice there shouldn't be any recursive calls when given empty txdata,
                        // it's still possible if a chain::Filter implementation returns a transaction.
                        debug_assert!(txdata.is_empty());
                        monitor.best_block_updated(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &logger)
                });
        }
 
@@ -711,29 +718,30 @@ where C::Target: chain::Filter,
            P::Target: Persist<ChannelSigner>,
 {
        fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
+               let logger = WithChannelMonitor::from(&self.logger, &monitor);
                let mut monitors = self.monitors.write().unwrap();
                let entry = match monitors.entry(funding_outpoint) {
                        hash_map::Entry::Occupied(_) => {
-                               log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
+                               log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
                                return Err(());
                        },
                        hash_map::Entry::Vacant(e) => e,
                };
-               log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
+               log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
                let update_id = MonitorUpdateId::from_new_monitor(&monitor);
                let mut pending_monitor_updates = Vec::new();
                let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
                match persist_res {
                        ChannelMonitorUpdateStatus::InProgress => {
-                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+                               log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
                                pending_monitor_updates.push(update_id);
                        },
                        ChannelMonitorUpdateStatus::Completed => {
-                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+                               log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
                        },
                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                               log_error!(self.logger, "{}", err_str);
+                               log_error!(logger, "{}", err_str);
                                panic!("{}", err_str);
                        },
                }
@@ -750,8 +758,9 @@ where C::Target: chain::Filter,
 
        fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
                // Update the monitor that watches the channel referred to by the given outpoint.
-               let monitors = self.monitors.read().unwrap();
-               let ret = match monitors.get(&funding_txo) {
+               let monitors_lock = self.monitors.read().unwrap();
+               let monitors = monitors_lock.deref();
+               match monitors.get(&funding_txo) {
                        None => {
                                log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
 
@@ -765,7 +774,8 @@ where C::Target: chain::Filter,
                        },
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
-                               log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
+                               let logger = WithChannelMonitor::from(&self.logger, &monitor);
+                               log_trace!(logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
                                let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
 
                                let update_id = MonitorUpdateId::from_monitor_update(update);
@@ -776,7 +786,7 @@ where C::Target: chain::Filter,
                                        // We don't want to persist a `monitor_update` which results in a failure to apply later
                                        // while reading `channel_monitor` with updates from storage. Instead, we should persist
                                        // the entire `channel_monitor` here.
-                                       log_warn!(self.logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
+                                       log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
                                        self.persister.update_persisted_channel(funding_txo, None, monitor, update_id)
                                } else {
                                        self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id)
@@ -784,12 +794,20 @@ where C::Target: chain::Filter,
                                match persist_res {
                                        ChannelMonitorUpdateStatus::InProgress => {
                                                pending_monitor_updates.push(update_id);
-                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+                                               log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
                                        },
                                        ChannelMonitorUpdateStatus::Completed => {
-                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                               log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::UnrecoverableError => {
+                                               // Take the monitors lock for writing so that we poison it and any future
+                                               // operations going forward fail immediately.
+                                               core::mem::drop(monitors);
+                                               let _poison = self.monitors.write().unwrap();
+                                               let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+                                               log_error!(logger, "{}", err_str);
+                                               panic!("{}", err_str);
                                        },
-                                       ChannelMonitorUpdateStatus::UnrecoverableError => { /* we'll panic in a moment */ },
                                }
                                if update_res.is_err() {
                                        ChannelMonitorUpdateStatus::InProgress
@@ -797,28 +815,19 @@ where C::Target: chain::Filter,
                                        persist_res
                                }
                        }
-               };
-               if let ChannelMonitorUpdateStatus::UnrecoverableError = ret {
-                       // Take the monitors lock for writing so that we poison it and any future
-                       // operations going forward fail immediately.
-                       core::mem::drop(monitors);
-                       let _poison = self.monitors.write().unwrap();
-                       let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                       log_error!(self.logger, "{}", err_str);
-                       panic!("{}", err_str);
                }
-               ret
        }
 
        fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
                        if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) {
                                if is_pending_monitor_update {
-                                       log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
-                                       log_error!(self.logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
-                                       log_error!(self.logger, "   This may cause duplicate payment events to be generated.");
+                                       log_error!(logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
+                                       log_error!(logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
+                                       log_error!(logger, "   This may cause duplicate payment events to be generated.");
                                }
                                let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
                                if monitor_events.len() > 0 {
index 62d254a5377d412adb12e9bc9b47863455090641..e0f19f39d4761f415fb7370ba25802b3285325d0 100644 (file)
@@ -34,7 +34,7 @@ use bitcoin::secp256k1;
 use bitcoin::sighash::EcdsaSighashType;
 
 use crate::ln::channel::INITIAL_COMMITMENT_NUMBER;
-use crate::ln::{PaymentHash, PaymentPreimage};
+use crate::ln::{PaymentHash, PaymentPreimage, ChannelId};
 use crate::ln::msgs::DecodeError;
 use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
 use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
@@ -47,7 +47,7 @@ use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDe
 use crate::chain::onchaintx::{ClaimEvent, OnchainTxHandler};
 use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
 use crate::chain::Filter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record};
 use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
 use crate::util::byte_utils;
 use crate::events::{Event, EventHandler};
@@ -1125,6 +1125,30 @@ macro_rules! _process_events_body {
 }
 pub(super) use _process_events_body as process_events_body;
 
+pub(crate) struct WithChannelMonitor<'a, L: Deref> where L::Target: Logger {
+       logger: &'a L,
+       peer_id: Option<PublicKey>,
+       channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = self.peer_id;
+               record.channel_id = self.channel_id;
+               self.logger.log(record)
+       }
+}
+
+impl<'a, 'b, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
+       pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &'b ChannelMonitor<S>) -> Self {
+               WithChannelMonitor {
+                       logger,
+                       peer_id: monitor.get_counterparty_node_id(),
+                       channel_id: Some(monitor.get_funding_txo().0.to_channel_id()),
+               }
+       }
+}
+
 impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// For lockorder enforcement purposes, we need to have a single site which constructs the
        /// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
@@ -4144,11 +4168,11 @@ where
        L::Target: Logger,
 {
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
+               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
        }
 
        fn block_disconnected(&self, header: &Header, height: u32) {
-               self.0.block_disconnected(header, height, &*self.1, &*self.2, &*self.3);
+               self.0.block_disconnected(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
        }
 }
 
@@ -4160,15 +4184,15 @@ where
        L::Target: Logger,
 {
        fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &*self.3);
+               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &*self.3);
+               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
        }
 
        fn best_block_updated(&self, header: &Header, height: u32) {
-               self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3);
+               self.0.best_block_updated(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
        }
 
        fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
@@ -4501,7 +4525,7 @@ mod tests {
        use super::ChannelMonitorUpdateStep;
        use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err};
        use crate::chain::{BestBlock, Confirm};
-       use crate::chain::channelmonitor::ChannelMonitor;
+       use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor};
        use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
        use crate::chain::transaction::OutPoint;
        use crate::sign::InMemorySigner;
@@ -4514,6 +4538,7 @@ mod tests {
        use crate::util::errors::APIError;
        use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
        use crate::util::ser::{ReadableArgs, Writeable};
+       use crate::util::logger::Logger;
        use crate::sync::{Arc, Mutex};
        use crate::io;
        use crate::ln::features::ChannelTypeFeatures;
@@ -4703,6 +4728,7 @@ mod tests {
 
                let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
                let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
+
                monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
                        htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap();
                monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()),
@@ -4900,5 +4926,62 @@ mod tests {
                }
        }
 
+       #[test]
+       fn test_with_channel_monitor_impl_logger() {
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(TestLogger::new());
+
+               let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+
+               let keys = InMemorySigner::new(
+                       &secp_ctx,
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       [41; 32],
+                       0,
+                       [0; 32],
+                       [0; 32],
+               );
+
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+                       revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap())),
+                       payment_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[46; 32]).unwrap()),
+                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap())),
+                       htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
+               };
+               let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+               let channel_parameters = ChannelTransactionParameters {
+                       holder_pubkeys: keys.holder_channel_pubkeys.clone(),
+                       holder_selected_contest_delay: 66,
+                       is_outbound_from_holder: true,
+                       counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+                               pubkeys: counterparty_pubkeys,
+                               selected_contest_delay: 67,
+                       }),
+                       funding_outpoint: Some(funding_outpoint),
+                       channel_type_features: ChannelTypeFeatures::only_static_remote_key()
+               };
+               let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let best_block = BestBlock::from_network(Network::Testnet);
+               let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
+                       Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
+                       (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
+                       &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
+                       best_block, dummy_key);
+
+               let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
+               let context_logger = WithChannelMonitor::from(&logger, &monitor);
+               log_error!(context_logger, "This is an error");
+               log_warn!(context_logger, "This is an error");
+               log_debug!(context_logger, "This is an error");
+               log_trace!(context_logger, "This is an error");
+               log_gossip!(context_logger, "This is an error");
+               log_info!(context_logger, "This is an error");
+               logger.assert_log_context_contains("lightning::chain::channelmonitor::tests", Some(dummy_key), Some(chan_id), 6);
+       }
        // Further testing is done in the ChannelManager integration tests.
 }
index adbc7faf7e0f9d0acf1fa3349495733669d41193..4e04a3634e10d3f9152ae6d074bb19365bf989a4 100644 (file)
@@ -72,6 +72,16 @@ pub enum PaymentPurpose {
        SpontaneousPayment(PaymentPreimage),
 }
 
+impl PaymentPurpose {
+       /// Returns the preimage for this payment, if it is known.
+       pub fn preimage(&self) -> Option<PaymentPreimage> {
+               match self {
+                       PaymentPurpose::InvoicePayment { payment_preimage, .. } => *payment_preimage,
+                       PaymentPurpose::SpontaneousPayment(preimage) => Some(*preimage),
+               }
+       }
+}
+
 impl_writeable_tlv_based_enum!(PaymentPurpose,
        (0, InvoicePayment) => {
                (0, payment_preimage, option),
index e8b6bfd679a43cbd7ec0eb51296d24a5c8dea2cc..635057deab21a3f6321bcdc29526d3610328f94d 100644 (file)
@@ -7,17 +7,66 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use bitcoin::secp256k1::Secp256k1;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
 use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
-use crate::events::MessageSendEventsProvider;
+use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
+use crate::events::{HTLCDestination, MessageSendEventsProvider};
+use crate::ln::PaymentSecret;
 use crate::ln::channelmanager;
 use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::ln::features::BlindedHopFeatures;
 use crate::ln::functional_test_utils::*;
+use crate::ln::msgs;
+use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::onion_utils;
+use crate::ln::onion_utils::INVALID_ONION_BLINDING;
 use crate::ln::outbound_payment::Retry;
 use crate::prelude::*;
 use crate::routing::router::{PaymentParameters, RouteParameters};
 use crate::util::config::UserConfig;
+use crate::util::test_utils;
+
+pub fn get_blinded_route_parameters(
+       amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
+       channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+) -> RouteParameters {
+       let mut intermediate_nodes = Vec::new();
+       for (node_id, chan_upd) in node_ids.iter().zip(channel_upds) {
+               intermediate_nodes.push(ForwardNode {
+                       node_id: *node_id,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: chan_upd.short_channel_id,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: chan_upd.cltv_expiry_delta,
+                                       fee_proportional_millionths: chan_upd.fee_proportional_millionths,
+                                       fee_base_msat: chan_upd.fee_base_msat,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: u32::max_value(),
+                                       htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: chan_upd.htlc_maximum_msat,
+               });
+       }
+       let payee_tlvs = ReceiveTlvs {
+               payment_secret,
+               payment_constraints: PaymentConstraints {
+                       max_cltv_expiry: u32::max_value(),
+                       htlc_minimum_msat: channel_upds.last().unwrap().htlc_minimum_msat,
+               },
+       };
+       let mut secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_payment(
+               &intermediate_nodes[..], *node_ids.last().unwrap(), payee_tlvs,
+               channel_upds.last().unwrap().htlc_maximum_msat, keys_manager, &secp_ctx
+       ).unwrap();
+
+       RouteParameters::from_payment_params_and_value(
+               PaymentParameters::blinded(vec![blinded_path]), amt_msat
+       )
+}
 
 #[test]
 fn one_hop_blinded_path() {
@@ -109,3 +158,277 @@ fn mpp_to_one_hop_blinded_path() {
                Some(payment_secret), ev.clone(), true, None);
        claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
 }
+
+enum ForwardCheckFail {
+       // Fail a check on the inbound onion payload. In this case, we underflow when calculating the
+       // outgoing cltv_expiry.
+       InboundOnionCheck,
+       // The forwarding node's payload is encoded as a receive, i.e. the next hop HMAC is [0; 32].
+       ForwardPayloadEncodedAsReceive,
+       // Fail a check on the outbound channel. In this case, our next-hop peer is offline.
+       OutboundChannelCheck,
+}
+
+#[test]
+fn forward_checks_failure() {
+       do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck);
+       do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive);
+       do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck);
+}
+
+fn do_forward_checks_failure(check: ForwardCheckFail) {
+       // Ensure we'll fail backwards properly if a forwarding check fails on initial update_add
+       // receipt.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       // We need the session priv to construct a bogus onion packet later.
+       *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       let route = get_route(&nodes[0], &route_params).unwrap();
+       node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       let mut update_add = &mut payment_event.msgs[0];
+       match check {
+               ForwardCheckFail::InboundOnionCheck => {
+                       update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
+               },
+               ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
+                       let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+                       let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+                       let cur_height = nodes[0].best_block_info().1;
+                       let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+                               &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+                       // Remove the receive payload so the blinded forward payload is encoded as a final payload
+                       // (i.e. next_hop_hmac == [0; 32])
+                       onion_payloads.pop();
+                       update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
+               },
+               ForwardCheckFail::OutboundChannelCheck => {
+                       // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
+                       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+               },
+       }
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true);
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn failed_backwards_to_intro_node() {
+       // Ensure the intro node will error backwards properly even if the downstream node did not blind
+       // their error.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(&nodes[1], 1);
+
+       let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       // Ensure the final node fails to handle the HTLC.
+       payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1;
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[2], 0);
+       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true);
+       nodes[2].node.process_pending_htlc_forwards();
+
+       let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+       // Ensure the final hop does not correctly blind their error.
+       update_malformed.sha256_of_onion = [1; 32];
+       nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+       do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+enum ProcessPendingHTLCsCheck {
+       FwdPeerDisconnected,
+       FwdChannelClosed,
+}
+
+#[test]
+fn forward_fail_in_process_pending_htlc_fwds() {
+       do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected);
+       do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed);
+}
+fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck) {
+       // Ensure the intro node will error backwards properly if the HTLC fails in
+       // process_pending_htlc_forwards.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let (chan_upd_1_2, channel_id) = {
+               let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+               (chan.0.contents, chan.2)
+       };
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
+
+       match check {
+               ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
+                       // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
+                       // intro node will error backwards.
+                       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
+                               vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id }]);
+               },
+               ProcessPendingHTLCsCheck::FwdChannelClosed => {
+                       // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
+                       // the intro node will error backwards.
+                       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       match events[0] {
+                               crate::events::Event::PendingHTLCsForwardable { .. } => {},
+                               _ => panic!("Unexpected event {:?}", events),
+                       };
+                       match events[1] {
+                               crate::events::Event::ChannelClosed { .. } => {},
+                               _ => panic!("Unexpected event {:?}", events),
+                       }
+
+                       nodes[1].node.process_pending_htlc_forwards();
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
+                               vec![HTLCDestination::UnknownNextHop { requested_forward_scid: chan_upd_1_2.short_channel_id }]);
+                       check_closed_broadcast(&nodes[1], 1, true);
+                       check_added_monitors!(nodes[1], 1);
+                       nodes[1].node.process_pending_htlc_forwards();
+               },
+       }
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       check_added_monitors!(nodes[1], 1);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn blinded_intercept_payment() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut intercept_forwards_config = test_default_channel_config();
+       intercept_forwards_config.accept_intercept_htlcs = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let intercept_scid = nodes[1].node.get_intercept_scid();
+       let mut intercept_chan_upd = chan_upd;
+       intercept_chan_upd.short_channel_id = intercept_scid;
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&intercept_chan_upd],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(),
+       PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+       let payment_event = {
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               SendEvent::from_event(events.remove(0))
+       };
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
+
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       let intercept_id = match events[0] {
+               crate::events::Event::HTLCIntercepted {
+                       intercept_id, payment_hash: pmt_hash,
+                       requested_next_hop_scid: short_channel_id, ..
+               } => {
+                       assert_eq!(pmt_hash, payment_hash);
+                       assert_eq!(short_channel_id, intercept_scid);
+                       intercept_id
+               },
+               _ => panic!()
+       };
+
+       nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
+       nodes[1].node.process_pending_htlc_forwards();
+       let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       check_added_monitors!(&nodes[1], 1);
+       assert!(update_fail.update_fail_htlcs.len() == 1);
+       let fail_msg = update_fail.update_fail_htlcs[0].clone();
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
+       commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
index a9dbf5bbe51936f592055e2ba2070688a1f71827..9455a76140b5b627a21bb0a948caefa85dc3c1c7 100644 (file)
@@ -35,14 +35,14 @@ use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
 use crate::chain::BestBlock;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 use crate::events::ClosureReason;
 use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record, WithContext};
 use crate::util::errors::APIError;
 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
 use crate::util::scid_utils::scid_from_parts;
@@ -166,6 +166,7 @@ struct InboundHTLCOutput {
        state: InboundHTLCState,
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum OutboundHTLCState {
        /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
        /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
@@ -199,6 +200,7 @@ enum OutboundHTLCState {
 }
 
 #[derive(Clone)]
+#[cfg_attr(test, derive(Debug, PartialEq))]
 enum OutboundHTLCOutcome {
        /// LDK version 0.0.105+ will always fill in the preimage here.
        Success(Option<PaymentPreimage>),
@@ -223,6 +225,7 @@ impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
        }
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 struct OutboundHTLCOutput {
        htlc_id: u64,
        amount_msat: u64,
@@ -230,10 +233,12 @@ struct OutboundHTLCOutput {
        payment_hash: PaymentHash,
        state: OutboundHTLCState,
        source: HTLCSource,
+       blinding_point: Option<PublicKey>,
        skimmed_fee_msat: Option<u64>,
 }
 
 /// See AwaitingRemoteRevoke ChannelState for more info
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum HTLCUpdateAwaitingACK {
        AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
                // always outbound
@@ -244,6 +249,7 @@ enum HTLCUpdateAwaitingACK {
                onion_routing_packet: msgs::OnionPacket,
                // The extra fee we're skimming off the top of this HTLC.
                skimmed_fee_msat: Option<u64>,
+               blinding_point: Option<PublicKey>,
        },
        ClaimHTLC {
                payment_preimage: PaymentPreimage,
@@ -408,6 +414,33 @@ impl fmt::Display for ChannelError {
        }
 }
 
+pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
+       pub logger: &'a L,
+       pub peer_id: Option<PublicKey>,
+       pub channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = self.peer_id;
+               record.channel_id = self.channel_id;
+               self.logger.log(record)
+       }
+}
+
+impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
+where L::Target: Logger {
+       pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+       where S::Target: SignerProvider
+       {
+               WithChannelContext {
+                       logger,
+                       peer_id: Some(context.counterparty_node_id),
+                       channel_id: Some(context.channel_id),
+               }
+       }
+}
+
 macro_rules! secp_check {
        ($res: expr, $err: expr) => {
                match $res {
@@ -567,6 +600,8 @@ pub(crate) struct ShutdownResult {
        /// An unbroadcasted batch funding transaction id. The closure of this channel should be
        /// propagated to the remainder of the batch.
        pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+       pub(crate) channel_id: ChannelId,
+       pub(crate) counterparty_node_id: PublicKey,
 }
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -2137,6 +2172,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        monitor_update,
                        dropped_outbound_htlcs,
                        unbroadcasted_batch_funding_txid,
+                       channel_id: self.channel_id,
+                       counterparty_node_id: self.counterparty_node_id,
                }
        }
 
@@ -2151,6 +2188,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                        .map(|(sig, _)| sig).ok()?
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                };
 
@@ -2205,6 +2243,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                (counterparty_initial_commitment_tx, funding_signed)
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                }
        }
@@ -2744,14 +2783,14 @@ impl<SP: Deref> Channel<SP> where
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
                                                          holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
+               let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_bitcoin_tx.txid, Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number,
                        self.context.counterparty_cur_commitment_point.unwrap(),
                        counterparty_initial_commitment_tx.feerate_per_kw(),
                        counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
-                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
 
                assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
                if self.context.is_batch_funding() {
@@ -3360,11 +3399,12 @@ impl<SP: Deref> Channel<SP> where
                                match &htlc_update {
                                        &HTLCUpdateAwaitingACK::AddHTLC {
                                                amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
-                                               skimmed_fee_msat, ..
+                                               skimmed_fee_msat, blinding_point, ..
                                        } => {
-                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
-                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
-                                               {
+                                               match self.send_htlc(
+                                                       amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
+                                                       false, skimmed_fee_msat, blinding_point, fee_estimator, logger
+                                               ) {
                                                        Ok(_) => update_add_count += 1,
                                                        Err(e) => {
                                                                match e {
@@ -3498,6 +3538,7 @@ impl<SP: Deref> Channel<SP> where
                                ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                };
 
@@ -4079,6 +4120,7 @@ impl<SP: Deref> Channel<SP> where
                                        cltv_expiry: htlc.cltv_expiry,
                                        onion_routing_packet: (**onion_packet).clone(),
                                        skimmed_fee_msat: htlc.skimmed_fee_msat,
+                                       blinding_point: htlc.blinding_point,
                                });
                        }
                }
@@ -4179,6 +4221,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
 
+               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
                if msg.next_remote_commitment_number > 0 {
                        let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
                        let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
@@ -4186,7 +4229,7 @@ impl<SP: Deref> Channel<SP> where
                        if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
                                return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                        }
-                       if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+                       if msg.next_remote_commitment_number > our_commitment_transaction {
                                macro_rules! log_and_panic {
                                        ($err_msg: expr) => {
                                                log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
@@ -4206,11 +4249,12 @@ impl<SP: Deref> Channel<SP> where
 
                // Before we change the state of the channel, we check if the peer is sending a very old
                // commitment transaction number, if yes we send a warning message.
-               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
-               if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
-                       return Err(
-                               ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
-                       );
+               if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+                       return Err(ChannelError::Warn(format!(
+                               "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_remote_commitment_number,
+                               our_commitment_transaction
+                       )));
                }
 
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
@@ -4252,11 +4296,11 @@ impl<SP: Deref> Channel<SP> where
                        });
                }
 
-               let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+               let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
                        // Remote isn't waiting on any RevokeAndACK from us!
                        // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
                        None
-               } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
+               } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
                        if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
                                self.context.monitor_pending_revoke_and_ack = true;
                                None
@@ -4264,7 +4308,12 @@ impl<SP: Deref> Channel<SP> where
                                Some(self.get_last_revoke_and_ack())
                        }
                } else {
-                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
+                       debug_assert!(false, "All values should have been handled in the four cases above");
+                       return Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_remote_commitment_number,
+                               our_commitment_transaction
+                       )));
                };
 
                // We increment cur_counterparty_commitment_transaction_number only upon receipt of
@@ -4322,8 +4371,18 @@ impl<SP: Deref> Channel<SP> where
                                        order: self.context.resend_order.clone(),
                                })
                        }
+               } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
+                       Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_local_commitment_number,
+                               next_counterparty_commitment_number,
+                       )))
                } else {
-                       Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
+                       Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_local_commitment_number,
+                               next_counterparty_commitment_number,
+                       )))
                }
        }
 
@@ -4453,6 +4512,7 @@ impl<SP: Deref> Channel<SP> where
                                }), None, None))
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                }
        }
@@ -4657,6 +4717,8 @@ impl<SP: Deref> Channel<SP> where
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                                       channel_id: self.context.channel_id,
+                                       counterparty_node_id: self.context.counterparty_node_id,
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete as u32;
@@ -4685,6 +4747,8 @@ impl<SP: Deref> Channel<SP> where
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                                                               channel_id: self.context.channel_id,
+                                                               counterparty_node_id: self.context.counterparty_node_id,
                                                        };
                                                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
                                                        self.context.update_time_counter += 1;
@@ -4706,6 +4770,7 @@ impl<SP: Deref> Channel<SP> where
                                                }), signed_tx, shutdown_result))
                                        },
                                        // TODO (taproot|arik)
+                                       #[cfg(taproot)]
                                        _ => todo!()
                                }
                        }
@@ -5339,6 +5404,7 @@ impl<SP: Deref> Channel<SP> where
                                })
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                }
        }
@@ -5368,6 +5434,7 @@ impl<SP: Deref> Channel<SP> where
                                        })
                                },
                                // TODO (taproot|arik)
+                               #[cfg(taproot)]
                                _ => todo!()
                        }
                } else {
@@ -5486,13 +5553,13 @@ impl<SP: Deref> Channel<SP> where
        pub fn queue_add_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
-               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+               blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) -> Result<(), ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger
        {
                self
                        .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
-                               skimmed_fee_msat, fee_estimator, logger)
+                               skimmed_fee_msat, blinding_point, fee_estimator, logger)
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
                        .map_err(|err| {
                                if let ChannelError::Ignore(_) = err { /* fine */ }
@@ -5520,7 +5587,8 @@ impl<SP: Deref> Channel<SP> where
        fn send_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
-               skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+               skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger
        {
@@ -5577,6 +5645,7 @@ impl<SP: Deref> Channel<SP> where
                                source,
                                onion_routing_packet,
                                skimmed_fee_msat,
+                               blinding_point,
                        });
                        return Ok(None);
                }
@@ -5588,6 +5657,7 @@ impl<SP: Deref> Channel<SP> where
                        cltv_expiry,
                        state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
                        source,
+                       blinding_point,
                        skimmed_fee_msat,
                });
 
@@ -5599,6 +5669,7 @@ impl<SP: Deref> Channel<SP> where
                        cltv_expiry,
                        onion_routing_packet,
                        skimmed_fee_msat,
+                       blinding_point,
                };
                self.context.next_holder_htlc_id += 1;
 
@@ -5747,6 +5818,7 @@ impl<SP: Deref> Channel<SP> where
                                }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
                        },
                        // TODO (taproot|arik)
+                       #[cfg(taproot)]
                        _ => todo!()
                }
        }
@@ -5764,7 +5836,7 @@ impl<SP: Deref> Channel<SP> where
        where F::Target: FeeEstimator, L::Target: Logger
        {
                let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
-                       onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
+                       onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
                if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
                match send_res? {
                        Some(_) => {
@@ -5859,6 +5931,8 @@ impl<SP: Deref> Channel<SP> where
                                monitor_update: None,
                                dropped_outbound_htlcs: Vec::new(),
                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                               channel_id: self.context.channel_id,
+                               counterparty_node_id: self.context.counterparty_node_id,
                        };
                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
                        Some(shutdown_result)
@@ -6436,6 +6510,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                          F::Target: FeeEstimator,
                          L::Target: Logger,
        {
+               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
                let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 
                // First check the channel type is known, failing before we do anything else if we don't
@@ -6502,7 +6577,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                if msg.htlc_minimum_msat >= full_channel_value_msat {
                        return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
-               Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
+               Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
 
                let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
                if msg.to_self_delay > max_counterparty_selected_contest_delay {
@@ -6921,13 +6996,13 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
                                                          holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
+               let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number + 1,
                        self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
                        counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
-                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
 
                log_info!(logger, "{} funding_signed for peer for channel {}",
                        if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
@@ -7074,6 +7149,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
 
                let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
                let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
+               let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
 
                (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
                for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
@@ -7120,15 +7196,17 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        } else if !pending_outbound_skimmed_fees.is_empty() {
                                pending_outbound_skimmed_fees.push(None);
                        }
+                       pending_outbound_blinding_points.push(htlc.blinding_point);
                }
 
                let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
+               let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
                (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
                for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
                        match update {
                                &HTLCUpdateAwaitingACK::AddHTLC {
                                        ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
-                                       skimmed_fee_msat,
+                                       blinding_point, skimmed_fee_msat,
                                } => {
                                        0u8.write(writer)?;
                                        amount_msat.write(writer)?;
@@ -7143,6 +7221,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                                                }
                                                holding_cell_skimmed_fees.push(Some(skimmed_fee));
                                        } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
+
+                                       holding_cell_blinding_points.push(blinding_point);
                                },
                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
                                        1u8.write(writer)?;
@@ -7312,6 +7392,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
                        (38, self.context.is_batch_funding, option),
+                       (39, pending_outbound_blinding_points, optional_vec),
+                       (41, holding_cell_blinding_points, optional_vec),
                });
 
                Ok(())
@@ -7423,6 +7505,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        _ => return Err(DecodeError::InvalidValue),
                                },
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        });
                }
 
@@ -7437,6 +7520,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        source: Readable::read(reader)?,
                                        onion_routing_packet: Readable::read(reader)?,
                                        skimmed_fee_msat: None,
+                                       blinding_point: None,
                                },
                                1 => HTLCUpdateAwaitingACK::ClaimHTLC {
                                        payment_preimage: Readable::read(reader)?,
@@ -7597,6 +7681,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut is_batch_funding: Option<()> = None;
 
+               let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+               let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -7623,6 +7710,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
                        (38, is_batch_funding, option),
+                       (39, pending_outbound_blinding_points_opt, optional_vec),
+                       (41, holding_cell_blinding_points_opt, optional_vec),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7699,6 +7788,24 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        // We expect all skimmed fees to be consumed above
                        if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
                }
+               if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
+                       let mut iter = blinding_pts.into_iter();
+                       for htlc in pending_outbound_htlcs.iter_mut() {
+                               htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+                       }
+                       // We expect all blinding points to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+               if let Some(blinding_pts) = holding_cell_blinding_points_opt {
+                       let mut iter = blinding_pts.into_iter();
+                       for htlc in holding_cell_htlc_updates.iter_mut() {
+                               if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
+                                       *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+                               }
+                       }
+                       // We expect all blinding points to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
 
                Ok(Channel {
                        context: ChannelContext {
@@ -7834,13 +7941,14 @@ mod tests {
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::blockdata::opcodes;
        use bitcoin::network::constants::Network;
-       use crate::ln::PaymentHash;
+       use crate::ln::{PaymentHash, PaymentPreimage};
        use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
-use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
+       use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+       use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
-       use crate::ln::features::ChannelTypeFeatures;
+       use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
+       use crate::ln::msgs;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use crate::ln::script::ShutdownScript;
        use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
@@ -7848,9 +7956,10 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
        use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
        use crate::chain::transaction::OutPoint;
-       use crate::routing::router::Path;
+       use crate::routing::router::{Path, RouteHop};
        use crate::util::config::UserConfig;
        use crate::util::errors::APIError;
+       use crate::util::ser::{ReadableArgs, Writeable};
        use crate::util::test_utils;
        use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
        use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
@@ -8038,6 +8147,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                payment_id: PaymentId([42; 32]),
                        },
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                });
 
                // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
@@ -8363,6 +8473,96 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                assert!(!node_a_chan.channel_update(&update).unwrap());
        }
 
+       #[test]
+       fn blinding_point_ser() {
+               // Ensure that channel blinding points are (de)serialized properly.
+               let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let config = UserConfig::default();
+               let features = channelmanager::provided_init_features(&config);
+               let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut chan = Channel { context: outbound_chan.context };
+
+               let dummy_htlc_source = HTLCSource::OutboundRoute {
+                       path: Path {
+                               hops: vec![RouteHop {
+                                       pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
+                                       node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
+                                       cltv_expiry_delta: 0, maybe_announced_channel: false,
+                               }],
+                               blinded_tail: None
+                       },
+                       session_priv: test_utils::privkey(42),
+                       first_hop_htlc_msat: 0,
+                       payment_id: PaymentId([42; 32]),
+               };
+               let dummy_outbound_output = OutboundHTLCOutput {
+                       htlc_id: 0,
+                       amount_msat: 0,
+                       payment_hash: PaymentHash([43; 32]),
+                       cltv_expiry: 0,
+                       state: OutboundHTLCState::Committed,
+                       source: dummy_htlc_source.clone(),
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               };
+               let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
+               for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
+                       if idx % 2 == 0 {
+                               htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
+                       }
+               }
+               chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
+
+               let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
+                       amount_msat: 0,
+                       cltv_expiry: 0,
+                       payment_hash: PaymentHash([43; 32]),
+                       source: dummy_htlc_source.clone(),
+                       onion_routing_packet: msgs::OnionPacket {
+                               version: 0,
+                               public_key: Ok(test_utils::pubkey(1)),
+                               hop_data: [0; 20*65],
+                               hmac: [0; 32]
+                       },
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               };
+               let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
+                       payment_preimage: PaymentPreimage([42; 32]),
+                       htlc_id: 0,
+               };
+               let mut holding_cell_htlc_updates = Vec::with_capacity(10);
+               for i in 0..10 {
+                       if i % 3 == 0 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
+                       } else if i % 3 == 1 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
+                       } else {
+                               let mut dummy_add = dummy_holding_cell_add_htlc.clone();
+                               if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
+                                       *blinding_point = Some(test_utils::pubkey(42 + i));
+                               } else { panic!() }
+                               holding_cell_htlc_updates.push(dummy_add);
+                       }
+               }
+               chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
+
+               // Encode and decode the channel and ensure that the HTLCs within are the same.
+               let encoded_chan = chan.encode();
+               let mut s = crate::io::Cursor::new(&encoded_chan);
+               let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
+               let features = channelmanager::provided_channel_type_features(&config);
+               let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
+               assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
+               assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
+       }
+
        #[cfg(feature = "_test_vectors")]
        #[test]
        fn outbound_commitment_test() {
@@ -8612,6 +8812,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
                        out
@@ -8625,6 +8826,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
                        out
@@ -9036,6 +9238,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
                        out
@@ -9049,6 +9252,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
                        out
@@ -9115,7 +9319,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
                           <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
        }
-       
+
        #[test]
        fn test_key_derivation() {
                // Test vectors from BOLT 3 Appendix E:
index a7e428c20b0eff3dd58fdbf74c6c7a3a9c4359de..a3842285263f604bbda4b46d599db205bcc0cfbb 100644 (file)
@@ -36,23 +36,24 @@ use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
 use crate::chain;
 use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::events;
 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
 use crate::routing::gossip::NetworkGraph;
 use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
 use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
-use crate::ln::onion_utils::HTLCFailReason;
+use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
 use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
 #[cfg(test)]
 use crate::ln::outbound_payment;
@@ -72,7 +73,7 @@ use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
 use crate::util::string::UntrustedString;
 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
-use crate::util::logger::{Level, Logger};
+use crate::util::logger::{Level, Logger, WithContext};
 use crate::util::errors::APIError;
 
 use alloc::collections::{btree_map, BTreeMap};
@@ -118,16 +119,23 @@ pub enum PendingHTLCRouting {
                /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
                /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
+               /// Set if this HTLC is being forwarded within a blinded path.
+               blinded: Option<BlindedForward>,
        },
-       /// An HTLC paid to an invoice we generated.
+       /// An HTLC paid to an invoice (supposedly) generated by us.
+       /// At this point, we have not checked that the invoice being paid was actually generated by us,
+       /// but rather it's claiming to pay an invoice of ours.
        Receive {
                /// Payment secret and total msat received.
                payment_data: msgs::FinalOnionHopData,
                /// See [`RecipientOnionFields::payment_metadata`] for more info.
                payment_metadata: Option<Vec<u8>>,
+               /// CLTV expiry of the received HTLC.
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// Optional shared secret for phantom node.
+               /// Shared secret derived using a phantom node secret key. If this field is Some, the
+               /// payment was sent to a phantom node (one hop beyond the current node), but can be
+               /// settled by this node.
                phantom_shared_secret: Option<[u8; 32]>,
                /// See [`RecipientOnionFields::custom_tlvs`] for more info.
                custom_tlvs: Vec<(u64, Vec<u8>)>,
@@ -136,31 +144,57 @@ pub enum PendingHTLCRouting {
        ReceiveKeysend {
                /// This was added in 0.0.116 and will break deserialization on downgrades.
                payment_data: Option<msgs::FinalOnionHopData>,
-               /// Preimage for this onion payment.
+               /// Preimage for this onion payment. This preimage is provided by the sender and will be
+               /// used to settle the spontaneous payment.
                payment_preimage: PaymentPreimage,
                /// See [`RecipientOnionFields::payment_metadata`] for more info.
                payment_metadata: Option<Vec<u8>>,
-               /// CLTV expiry of the incoming HTLC.
-               incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+               /// CLTV expiry of the received HTLC.
+               /// Used to track when we should expire pending HTLCs that go unclaimed.
+               incoming_cltv_expiry: u32,
                /// See [`RecipientOnionFields::custom_tlvs`] for more info.
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
 }
 
+/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
+#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+pub struct BlindedForward {
+       /// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
+       /// onion payload if we're the introduction node. Useful for calculating the next hop's
+       /// [`msgs::UpdateAddHTLC::blinding_point`].
+       pub inbound_blinding_point: PublicKey,
+       // Another field will be added here when we support forwarding as a non-intro node.
+}
+
+impl PendingHTLCRouting {
+       // Used to override the onion failure code and data if the HTLC is blinded.
+       fn blinded_failure(&self) -> Option<BlindedFailure> {
+               // TODO: needs update when we support receiving to multi-hop blinded paths
+               if let Self::Forward { blinded: Some(_), .. } = self {
+                       Some(BlindedFailure::FromIntroductionNode)
+               } else {
+                       None
+               }
+       }
+}
+
 /// Full details of an incoming HTLC, including routing info.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub struct PendingHTLCInfo {
        /// Further routing details based on whether the HTLC is being forwarded or received.
        pub routing: PendingHTLCRouting,
        /// Shared secret from the previous hop.
+       /// Used encrypt failure packets in the event that the HTLC needs to be failed backwards.
        pub incoming_shared_secret: [u8; 32],
-       payment_hash: PaymentHash,
-       /// Amount received
+       /// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
+       pub payment_hash: PaymentHash,
+       /// Amount offered by this HTLC.
        pub incoming_amt_msat: Option<u64>, // Added in 0.0.113
        /// Sender intended amount to forward or receive (actual amount received
        /// may overshoot this in either case)
        pub outgoing_amt_msat: u64,
-       /// Outgoing CLTV height.
+       /// Outgoing timelock expiration blockheight.
        pub outgoing_cltv_value: u32,
        /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
        /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
@@ -203,6 +237,13 @@ pub(super) enum HTLCForwardInfo {
        },
 }
 
+// Used for failing blinded HTLCs backwards correctly.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+enum BlindedFailure {
+       FromIntroductionNode,
+       // Another variant will be added here for non-intro nodes.
+}
+
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub(crate) struct HTLCPreviousHopData {
@@ -212,6 +253,7 @@ pub(crate) struct HTLCPreviousHopData {
        htlc_id: u64,
        incoming_packet_shared_secret: [u8; 32],
        phantom_shared_secret: Option<[u8; 32]>,
+       blinded_failure: Option<BlindedFailure>,
 
        // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
        // channel with a preimage provided by the forward channel.
@@ -397,16 +439,6 @@ impl HTLCSource {
        }
 }
 
-/// Invalid inbound onion payment.
-pub struct InboundOnionErr {
-       /// BOLT 4 error code.
-       pub err_code: u16,
-       /// Data attached to this error.
-       pub err_data: Vec<u8>,
-       /// Error message text.
-       pub msg: &'static str,
-}
-
 /// This enum is used to specify which error data to send to peers when failing back an HTLC
 /// using [`ChannelManager::fail_htlc_backwards_with_reason`].
 ///
@@ -1885,7 +1917,10 @@ macro_rules! handle_error {
                                        }
                                }
 
-                               log_error!($self.logger, "{}", err.err);
+                               let logger = WithContext::from(
+                                       &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
+                               );
+                               log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@ -1907,15 +1942,6 @@ macro_rules! handle_error {
                        },
                }
        } };
-       ($self: ident, $internal: expr) => {
-               match $internal {
-                       Ok(res) => Ok(res),
-                       Err((chan, msg_handle_err)) => {
-                               let counterparty_node_id = chan.get_counterparty_node_id();
-                               handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err))
-                       },
-               }
-       };
 }
 
 macro_rules! update_maps_on_chan_removal {
@@ -1949,7 +1975,8 @@ macro_rules! convert_chan_phase_err {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
                        },
                        ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+                               let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+                               log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
                                let shutdown_res = $channel.context.force_shutdown(true);
                                let user_id = $channel.context.get_user_id();
@@ -2075,7 +2102,8 @@ macro_rules! emit_channel_ready_event {
 
 macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
-               let mut updates = $chan.monitor_updating_restored(&$self.logger,
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+               let mut updates = $chan.monitor_updating_restored(&&logger,
                        &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
@@ -2170,14 +2198,15 @@ macro_rules! handle_monitor_update_completion {
 macro_rules! handle_new_monitor_update {
        ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
                match $update_res {
                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                               log_error!($self.logger, "{}", err_str);
+                               log_error!(logger, "{}", err_str);
                                panic!("{}", err_str);
                        },
                        ChannelMonitorUpdateStatus::InProgress => {
-                               log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+                               log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        &$chan.context.channel_id());
                                false
                        },
@@ -2779,7 +2808,10 @@ where
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
 
-               log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+               let logger = WithContext::from(
+                       &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+               );
+               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
@@ -2834,8 +2866,9 @@ where
                        } else {
                                ClosureReason::HolderForceClosed
                        };
+                       let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", channel_id);
+                               log_error!(logger, "Force-closing channel {}", channel_id);
                                self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
@@ -2852,7 +2885,7 @@ where
                                        },
                                }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
-                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
+                               log_error!(logger, "Force-closing channel {}", &channel_id);
                                // N.B. that we don't send any channel close event here: we
                                // don't have a user_channel_id, and we never sent any opening
                                // events anyway.
@@ -2937,7 +2970,7 @@ where
        }
 
        fn decode_update_add_htlc_onion(
-               &self, msg: &msgs::UpdateAddHTLC
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
        ) -> Result<
                (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
        > {
@@ -2945,14 +2978,27 @@ where
                        msg, &self.node_signer, &self.logger, &self.secp_ctx
                )?;
 
+               let is_blinded = match next_hop {
+                       onion_utils::Hop::Forward {
+                               next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+                       } => true,
+                       _ => false, // TODO: update this when we support receiving to multi-hop blinded paths
+               };
+
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       log_info!(
+                                               WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
+                                               "Failed to accept/forward incoming HTLC: {}", $msg
+                                       );
+                                       let (err_code, err_data) = if is_blinded {
+                                               (INVALID_ONION_BLINDING, &[0; 32][..])
+                                       } else { ($err_code, $data) };
                                        return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
-                                               reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                               reason: HTLCFailReason::reason(err_code, err_data.to_vec())
                                                        .get_encrypted_failure_packet(&shared_secret, &None),
                                        }));
                                }
@@ -3090,13 +3136,15 @@ where
        }
 
        fn construct_pending_htlc_status<'a>(
-               &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
-               allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
+               decoded_hop: onion_utils::Hop, allow_underpay: bool,
+               next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
        ) -> PendingHTLCStatus {
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+                                       log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
                                        return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
@@ -3154,7 +3202,8 @@ where
                if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -3170,7 +3219,8 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0));
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -3180,7 +3230,8 @@ where
        }
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0));
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -3230,24 +3281,33 @@ where
                } = args;
                // The top-level caller should hold the total_consistency_lock read lock.
                debug_assert!(self.total_consistency_lock.try_write().is_err());
-
-               log_trace!(self.logger,
-                       "Attempting to send payment with payment hash {} along path with next hop {}",
-                       payment_hash, path.hops.first().unwrap().short_channel_id);
                let prng_seed = self.entropy_source.get_secure_random_bytes();
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
 
                let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
                        &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
                        payment_hash, keysend_preimage, prng_seed
-               )?;
+               ).map_err(|e| {
+                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                       log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
+                       e
+               })?;
 
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
-                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
+                               None => {
+                                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                                       log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
+                                       return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
+                               },
                                Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        };
 
+                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+                       log_trace!(logger,
+                               "Attempting to send payment with payment hash {} along path with next hop {}",
+                               payment_hash, path.hops.first().unwrap().short_channel_id);
+
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
                                .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
@@ -3260,13 +3320,14 @@ where
                                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                                }
                                                let funding_txo = chan.context.get_funding_txo().unwrap();
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
                                                        htlc_cltv, HTLCSource::OutboundRoute {
                                                                path: path.clone(),
                                                                session_priv: session_priv.clone(),
                                                                first_hop_htlc_msat: htlc_msat,
                                                                payment_id,
-                                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
+                                                       }, onion_packet, None, &self.fee_estimator, &&logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
                                                                match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
@@ -3296,7 +3357,6 @@ where
                        }
                        return Ok(());
                };
-
                match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
                        Ok(_) => unreachable!(),
                        Err(e) => {
@@ -3622,7 +3682,8 @@ where
                        Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
-                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
+                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
                                                let user_id = chan.context.get_user_id();
@@ -3635,7 +3696,6 @@ where
                                        Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
-
                                                let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
                                                return Err(APIError::ChannelUnavailable {
                                                        err: "Signer refused to sign the initial commitment transaction".to_owned()
@@ -3999,7 +4059,8 @@ where
                                None => {
                                        let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
                                                next_hop_channel_id, next_node_id);
-                                       log_error!(self.logger, "{} when attempting to forward intercepted HTLC", error);
+                                       let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+                                       log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
                                        return Err(APIError::ChannelUnavailable {
                                                err: error
                                        })
@@ -4013,8 +4074,10 @@ where
                        })?;
 
                let routing = match payment.forward_info.routing {
-                       PendingHTLCRouting::Forward { onion_packet, .. } => {
-                               PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid }
+                       PendingHTLCRouting::Forward { onion_packet, blinded, .. } => {
+                               PendingHTLCRouting::Forward {
+                                       onion_packet, blinded, short_channel_id: next_hop_scid
+                               }
                        },
                        _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
                };
@@ -4058,6 +4121,7 @@ where
                                htlc_id: payment.prev_htlc_id,
                                incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
                                phantom_shared_secret: None,
+                               blinded_failure: payment.forward_info.routing.blinded_failure(),
                        });
 
                        let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
@@ -4084,6 +4148,7 @@ where
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
                                if short_chan_id != 0 {
+                                       let mut forwarding_counterparty = None;
                                        macro_rules! forwarding_channel_not_found {
                                                () => {
                                                        for forward_info in pending_forwards.drain(..) {
@@ -4097,7 +4162,8 @@ where
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+                                                                                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
@@ -4106,6 +4172,7 @@ where
                                                                                                        htlc_id: prev_htlc_id,
                                                                                                        incoming_packet_shared_secret: incoming_shared_secret,
                                                                                                        phantom_shared_secret: $phantom_ss,
+                                                                                                       blinded_failure: routing.blinded_failure(),
                                                                                                });
 
                                                                                                let reason = if $next_hop_unknown {
@@ -4135,7 +4202,7 @@ where
                                                                                                }
                                                                                        }
                                                                                }
-                                                                               if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
+                                                                               if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
                                                                                        if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
@@ -4195,6 +4262,7 @@ where
                                                        continue;
                                                }
                                        };
+                                       forwarding_counterparty = Some(counterparty_node_id);
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() {
@@ -4204,16 +4272,19 @@ where
                                        let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
                                                        match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                        forward_info: PendingHTLCInfo {
                                                                                incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                               routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
+                                                                               routing: PendingHTLCRouting::Forward {
+                                                                                       onion_packet, blinded, ..
+                                                                               }, skimmed_fee_msat, ..
                                                                        },
                                                                }) => {
-                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+                                                                       log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
@@ -4222,14 +4293,23 @@ where
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                phantom_shared_secret: None,
+                                                                               blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+                                                                       });
+                                                                       let next_blinding_point = blinded.and_then(|b| {
+                                                                               let encrypted_tlvs_ss = self.node_signer.ecdh(
+                                                                                       Recipient::Node, &b.inbound_blinding_point, None
+                                                                               ).unwrap().secret_bytes();
+                                                                               onion_utils::next_hop_pubkey(
+                                                                                       &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+                                                                               ).ok()
                                                                        });
                                                                        if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
                                                                                payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                               onion_packet, skimmed_fee_msat, &self.fee_estimator,
-                                                                               &self.logger)
+                                                                               onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+                                                                               &&logger)
                                                                        {
                                                                                if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+                                                                                       log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
                                                                                } else {
                                                                                        panic!("Stated return value requirements in send_htlc() were not met");
                                                                                }
@@ -4245,12 +4325,12 @@ where
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
                                                                        if let Err(e) = chan.queue_fail_htlc(
-                                                                               htlc_id, err_packet, &self.logger
+                                                                               htlc_id, err_packet, &&logger
                                                                        ) {
                                                                                if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                                       log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
                                                                                } else {
                                                                                        panic!("Stated return value requirements in queue_fail_htlc() were not met");
                                                                                }
@@ -4276,6 +4356,7 @@ where
                                                                        skimmed_fee_msat, ..
                                                                }
                                                        }) => {
+                                                               let blinded_failure = routing.blinded_failure();
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
                                                                        PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
                                                                                let _legacy_hop_data = Some(payment_data.clone());
@@ -4305,6 +4386,7 @@ where
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                phantom_shared_secret,
+                                                                               blinded_failure,
                                                                        },
                                                                        // We differentiate the received value from the sender intended value
                                                                        // if possible so that we don't prematurely mark MPP payments complete
@@ -4335,6 +4417,7 @@ where
                                                                                                htlc_id: $htlc.prev_hop.htlc_id,
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
+                                                                                               blinded_failure: None,
                                                                                        }), payment_hash,
                                                                                        HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
                                                                                        HTLCDestination::FailedPayment { payment_hash: $payment_hash },
@@ -4615,23 +4698,26 @@ where
 
        fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
                if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
+
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
-                               log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+                               log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
                                chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        }
                        return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
-                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+                       log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
                                chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersistNoEvents;
                }
-               log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+               log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
-               chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
+               chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
                NotifyOption::DoPersist
        }
 
@@ -4710,7 +4796,8 @@ where
                        | {
                                context.maybe_expire_prev_config();
                                if unfunded_context.should_expire_unfunded_channel() {
-                                       log_error!(self.logger,
+                                       let logger = WithChannelContext::from(&self.logger, context);
+                                       log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
                                        self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
@@ -4795,7 +4882,8 @@ where
                                                                chan.context.maybe_expire_prev_config();
 
                                                                if chan.should_disconnect_peer_awaiting_response() {
-                                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                                       log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
                                                                                        counterparty_node_id, chan_id);
                                                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                                                node_id: counterparty_node_id,
@@ -4823,7 +4911,8 @@ where
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
-                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
+                                                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+                                                       log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
                                                        peer_state.pending_msg_events.push(
                                                                events::MessageSendEvent::HandleError {
                                                                        node_id: counterparty_node_id,
@@ -5098,9 +5187,26 @@ where
                                        &self.pending_events, &self.logger)
                                { self.push_pending_forwards_ev(); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => {
-                               log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", &payment_hash, onion_error);
-                               let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                               ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
+                               ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+                       }) => {
+                               log_trace!(
+                                       WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+                                       "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
+                                       if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
+                               );
+                               let err_packet = match blinded_failure {
+                                       Some(BlindedFailure::FromIntroductionNode) => {
+                                               let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
+                                               blinded_onion_error.get_encrypted_failure_packet(
+                                                       incoming_packet_shared_secret, phantom_shared_secret
+                                               )
+                                       },
+                                       None => {
+                                               onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret)
+                                       }
+                               };
 
                                let mut push_forward_ev = false;
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
@@ -5260,6 +5366,7 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
+                               let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
@@ -5270,6 +5377,7 @@ where
                                        if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
+                                               let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
                                                log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                        } else { errs.push((pk, err)); }
                                }
@@ -5328,12 +5436,13 @@ where
                                if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
-                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
 
                                                match fulfill_res {
                                                        UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
                                                                if let Some(action) = completion_action(Some(htlc_value_msat), false) {
-                                                                       log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                                       log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
                                                                                chan_id, action);
                                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                                }
@@ -5360,7 +5469,7 @@ where
                                                                };
                                                                mem::drop(peer_state_lock);
 
-                                                               log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
+                                                               log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
                                                                        chan_id, action);
                                                                let (node_id, funding_outpoint, blocker) =
                                                                if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
@@ -5417,7 +5526,7 @@ where
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
-                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
                } else {
@@ -5629,7 +5738,8 @@ where
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
-               log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+               let logger = WithChannelContext::from(&self.logger, &channel.context);
+               log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
@@ -5683,7 +5793,7 @@ where
                }
 
                if let Some(tx) = funding_broadcastable {
-                       log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+                       log_info!(logger, "Broadcasting funding transaction with txid {}", tx.txid());
                        self.tx_broadcaster.broadcast_transactions(&[&tx]);
                }
 
@@ -5733,7 +5843,8 @@ where
                                pending.retain(|upd| upd.update_id > highest_applied_update_id);
                                pending.len()
                        } else { 0 };
-               log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
+               let logger = WithChannelContext::from(&self.logger, &channel.context);
+               log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
                        highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
                        remaining_in_flight);
                if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
@@ -6059,7 +6170,8 @@ where
                let (chan, funding_msg_opt, monitor) =
                        match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
                                Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
-                                       match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
+                                       let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+                                       match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
                                                Ok(res) => res,
                                                Err((mut inbound_chan, err)) => {
                                                        // We've already removed this inbound channel from the map in `PeerState`
@@ -6119,7 +6231,8 @@ where
                                                        }
                                                        Ok(())
                                                } else {
-                                                       log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
                                                        let channel_id = match funding_msg_opt {
                                                                Some(msg) => msg.channel_id,
                                                                None => chan.context.channel_id(),
@@ -6149,8 +6262,9 @@ where
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                match chan_phase_entry.get_mut() {
                                        ChannelPhase::Funded(ref mut chan) => {
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                let monitor = try_chan_phase_entry!(self,
-                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
+                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry);
                                                if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
                                                        handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
                                                        Ok(())
@@ -6181,10 +6295,11 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
-                                               self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+                                               self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
-                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+                                               log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg: announcement_sigs,
@@ -6195,7 +6310,7 @@ where
                                                // counterparty's announcement_signatures. Thus, we only bother to send a
                                                // channel_update here if the channel is not public, i.e. we're not sending an
                                                // announcement_signatures.
-                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+                                               log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
                                                if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                node_id: counterparty_node_id.clone(),
@@ -6238,7 +6353,8 @@ where
                                match phase {
                                        ChannelPhase::Funded(chan) => {
                                                if !chan.received_shutdown() {
-                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
                                                                msg.channel_id,
                                                                if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
                                                }
@@ -6265,7 +6381,8 @@ where
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
-                                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+                                               let logger = WithChannelContext::from(&self.logger, context);
+                                               log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                                self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
                                                finish_shutdown = Some(chan.context_mut().force_shutdown(false));
@@ -6325,7 +6442,8 @@ where
                        }
                };
                if let Some(broadcast_tx) = tx {
-                       log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
+                       let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
+                       log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
                        self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
                if let Some(ChannelPhase::Funded(chan)) = chan_option {
@@ -6358,7 +6476,7 @@ where
                // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
                // closing a channel), so any changes are likely to be lost on restart!
 
-               let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
+               let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6372,8 +6490,10 @@ where
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
-                                                       self.construct_pending_htlc_status(msg, shared_secret, next_hop,
-                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                                       self.construct_pending_htlc_status(
+                                                               msg, counterparty_node_id, shared_secret, next_hop,
+                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
+                                                       ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
                                        let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
@@ -6381,8 +6501,12 @@ where
                                                // but if we've sent a shutdown and they haven't acknowledged it yet, we just
                                                // want to reject the new HTLC and fail it backwards instead of forwarding.
                                                match pending_forward_info {
-                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                               let reason = if (error_code & 0x1000) != 0 {
+                                                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                                                               ref incoming_shared_secret, ref routing, ..
+                                                       }) => {
+                                                               let reason = if routing.blinded_failure().is_some() {
+                                                                       HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+                                                               } else if (error_code & 0x1000) != 0 {
                                                                        let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
                                                                        HTLCFailReason::reason(real_code, error_data)
                                                                } else {
@@ -6398,7 +6522,8 @@ where
                                                        _ => pending_forward_info
                                                }
                                        };
-                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
@@ -6425,7 +6550,8 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
                                                if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
-                                                       log_trace!(self.logger,
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_trace!(logger,
                                                                "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                msg.channel_id);
                                                        peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
@@ -6518,8 +6644,9 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        let funding_txo = chan.context.get_funding_txo();
-                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
                                                        peer_state, per_peer_state, chan);
@@ -6576,7 +6703,8 @@ where
                                                                                        prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
-                                                                               log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
+                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+                                                                               log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
                                                                                        user_channel_id: Some(prev_user_channel_id),
@@ -6584,6 +6712,7 @@ where
                                                                                        htlc_id: prev_htlc_id,
                                                                                        incoming_packet_shared_secret: forward_info.incoming_shared_secret,
                                                                                        phantom_shared_secret: None,
+                                                                                       blinded_failure: forward_info.routing.blinded_failure(),
                                                                                });
 
                                                                                failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
@@ -6684,6 +6813,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
@@ -6691,7 +6821,7 @@ where
                                                                *counterparty_node_id)
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
-                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
                                                if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
@@ -6723,7 +6853,8 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
@@ -6802,7 +6933,8 @@ where
                                        if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
-                                               log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
                                                let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                                // If nothing changed after applying their update, we don't need to bother
                                                // persisting.
@@ -6833,6 +6965,7 @@ where
                                                msg.channel_id
                                        )
                                })?;
+                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
@@ -6843,7 +6976,7 @@ where
                                                // freed HTLCs to fail backwards. If in the future we no longer drop pending
                                                // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                                let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
-                                                       msg, &self.logger, &self.node_signer, self.chain_hash,
+                                                       msg, &&logger, &self.node_signer, self.chain_hash,
                                                        &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
                                                let mut channel_update = None;
                                                if let Some(msg) = responses.shutdown_msg {
@@ -6876,7 +7009,7 @@ where
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
-                                       log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+                                       log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
                                                log_bytes!(msg.channel_id.0));
                                        // Unfortunately, lnd doesn't force close on errors
                                        // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
@@ -6934,11 +7067,12 @@ where
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
+                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
                                                if let Some(preimage) = htlc_update.payment_preimage {
-                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+                                                       log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
                                                        self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
                                                } else {
-                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
+                                                       log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
                                                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
@@ -7026,7 +7160,7 @@ where
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
-                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
+                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
                                                if !holding_cell_failed_htlcs.is_empty() {
                                                        failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
                                                }
@@ -7129,7 +7263,8 @@ where
                                peer_state.channel_by_id.retain(|channel_id, phase| {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
                                                                Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
                                                                        if let Some(msg) = msg_opt {
                                                                                has_update = true;
@@ -7152,7 +7287,7 @@ where
 
                                                                                self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 
-                                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                                               log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
                                                                                false
@@ -7725,12 +7860,14 @@ where
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+               let logger = WithContext::from(
+                       &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+               );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
                                let mut peer_state_lck = peer_state_mtx.lock().unwrap();
                                let peer_state = &mut *peer_state_lck;
-
                                if let Some(blocker) = completed_blocker.take() {
                                        // Only do this on the first iteration of the loop.
                                        if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
@@ -7745,7 +7882,7 @@ where
                                        // Check that, while holding the peer lock, we don't have anything else
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
-                                       log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+                                       log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
                                                &channel_funding_outpoint.to_channel_id());
                                        break;
                                }
@@ -7754,7 +7891,7 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
-                                                       log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+                                                       log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_funding_outpoint.to_channel_id());
                                                        handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
@@ -7764,13 +7901,13 @@ where
                                                                continue;
                                                        }
                                                } else {
-                                                       log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+                                                       log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
                                                                channel_funding_outpoint.to_channel_id());
                                                }
                                        }
                                }
                        } else {
-                               log_debug!(self.logger,
+                               log_debug!(logger,
                                        "Got a release post-RAA monitor update for peer {} but the channel is gone",
                                        log_pubkey!(counterparty_node_id));
                        }
@@ -7802,346 +7939,6 @@ where
        }
 }
 
-fn create_fwd_pending_htlc_info(
-       msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
-       new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
-       next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
-       debug_assert!(next_packet_pubkey_opt.is_some());
-       let outgoing_packet = msgs::OnionPacket {
-               version: 0,
-               public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
-               hop_data: new_packet_bytes,
-               hmac: hop_hmac,
-       };
-
-       let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
-               msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
-                       (short_channel_id, amt_to_forward, outgoing_cltv_value),
-               msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
-                       return Err(InboundOnionErr {
-                               msg: "Final Node OnionHopData provided for us as an intermediary node",
-                               err_code: 0x4000 | 22,
-                               err_data: Vec::new(),
-                       }),
-       };
-
-       Ok(PendingHTLCInfo {
-               routing: PendingHTLCRouting::Forward {
-                       onion_packet: outgoing_packet,
-                       short_channel_id,
-               },
-               payment_hash: msg.payment_hash,
-               incoming_shared_secret: shared_secret,
-               incoming_amt_msat: Some(msg.amount_msat),
-               outgoing_amt_msat: amt_to_forward,
-               outgoing_cltv_value,
-               skimmed_fee_msat: None,
-       })
-}
-
-fn create_recv_pending_htlc_info(
-       hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
-       amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
-       counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
-       let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
-               msgs::InboundOnionPayload::Receive {
-                       payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
-               } =>
-                       (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
-               msgs::InboundOnionPayload::BlindedReceive {
-                       amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
-               } => {
-                       let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
-                       (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
-               }
-               msgs::InboundOnionPayload::Forward { .. } => {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "Got non final data with an HMAC of 0",
-                       })
-               },
-       };
-       // final_incorrect_cltv_expiry
-       if outgoing_cltv_value > cltv_expiry {
-               return Err(InboundOnionErr {
-                       msg: "Upstream node set CLTV to less than the CLTV set by the sender",
-                       err_code: 18,
-                       err_data: cltv_expiry.to_be_bytes().to_vec()
-               })
-       }
-       // final_expiry_too_soon
-       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
-       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
-       //
-       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
-       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
-       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
-       if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
-               let mut err_data = Vec::with_capacity(12);
-               err_data.extend_from_slice(&amt_msat.to_be_bytes());
-               err_data.extend_from_slice(&current_height.to_be_bytes());
-               return Err(InboundOnionErr {
-                       err_code: 0x4000 | 15, err_data,
-                       msg: "The final CLTV expiry is too soon to handle",
-               });
-       }
-       if (!allow_underpay && onion_amt_msat > amt_msat) ||
-               (allow_underpay && onion_amt_msat >
-                amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
-       {
-               return Err(InboundOnionErr {
-                       err_code: 19,
-                       err_data: amt_msat.to_be_bytes().to_vec(),
-                       msg: "Upstream node sent less than we were supposed to receive in payment",
-               });
-       }
-
-       let routing = if let Some(payment_preimage) = keysend_preimage {
-               // We need to check that the sender knows the keysend preimage before processing this
-               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
-               // could discover the final destination of X, by probing the adjacent nodes on the route
-               // with a keysend payment of identical payment hash to X and observing the processing
-               // time discrepancies due to a hash collision with X.
-               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
-               if hashed_preimage != payment_hash {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "Payment preimage didn't match payment hash",
-                       });
-               }
-               if !accept_mpp_keysend && payment_data.is_some() {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "We don't support MPP keysend payments",
-                       });
-               }
-               PendingHTLCRouting::ReceiveKeysend {
-                       payment_data,
-                       payment_preimage,
-                       payment_metadata,
-                       incoming_cltv_expiry: outgoing_cltv_value,
-                       custom_tlvs,
-               }
-       } else if let Some(data) = payment_data {
-               PendingHTLCRouting::Receive {
-                       payment_data: data,
-                       payment_metadata,
-                       incoming_cltv_expiry: outgoing_cltv_value,
-                       phantom_shared_secret,
-                       custom_tlvs,
-               }
-       } else {
-               return Err(InboundOnionErr {
-                       err_code: 0x4000|0x2000|3,
-                       err_data: Vec::new(),
-                       msg: "We require payment_secrets",
-               });
-       };
-       Ok(PendingHTLCInfo {
-               routing,
-               payment_hash,
-               incoming_shared_secret: shared_secret,
-               incoming_amt_msat: Some(amt_msat),
-               outgoing_amt_msat: onion_amt_msat,
-               outgoing_cltv_value,
-               skimmed_fee_msat: counterparty_skimmed_fee_msat,
-       })
-}
-
-/// Peel one layer off an incoming onion, returning [`PendingHTLCInfo`] (either Forward or Receive).
-/// This does all the relevant context-free checks that LDK requires for payment relay or
-/// acceptance. If the payment is to be received, and the amount matches the expected amount for
-/// a given invoice, this indicates the [`msgs::UpdateAddHTLC`], once fully committed in the
-/// channel, will generate an [`Event::PaymentClaimable`].
-pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
-       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
-       cur_height: u32, accept_mpp_keysend: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr>
-where
-       NS::Target: NodeSigner,
-       L::Target: Logger,
-{
-       let (hop, shared_secret, next_packet_details_opt) =
-               decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx
-       ).map_err(|e| {
-               let (err_code, err_data) = match e {
-                       HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()),
-                       HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason.data),
-               };
-               let msg = "Failed to decode update add htlc onion";
-               InboundOnionErr { msg, err_code, err_data }
-       })?;
-       Ok(match hop {
-               onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
-                       let NextPacketDetails {
-                               next_packet_pubkey, outgoing_amt_msat: _, outgoing_scid: _, outgoing_cltv_value
-                       } = match next_packet_details_opt {
-                               Some(next_packet_details) => next_packet_details,
-                               // Forward should always include the next hop details
-                               None => return Err(InboundOnionErr {
-                                       msg: "Failed to decode update add htlc onion",
-                                       err_code: 0x4000 | 22,
-                                       err_data: Vec::new(),
-                               }),
-                       };
-
-                       if let Err((err_msg, code)) = check_incoming_htlc_cltv(
-                               cur_height, outgoing_cltv_value, msg.cltv_expiry
-                       ) {
-                               return Err(InboundOnionErr {
-                                       msg: err_msg,
-                                       err_code: code,
-                                       err_data: Vec::new(),
-                               });
-                       }
-                       create_fwd_pending_htlc_info(
-                               msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret,
-                               Some(next_packet_pubkey)
-                       )?
-               },
-               onion_utils::Hop::Receive(received_data) => {
-                       create_recv_pending_htlc_info(
-                               received_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry,
-                               None, false, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
-                       )?
-               }
-       })
-}
-
-struct NextPacketDetails {
-       next_packet_pubkey: Result<PublicKey, secp256k1::Error>,
-       outgoing_scid: u64,
-       outgoing_amt_msat: u64,
-       outgoing_cltv_value: u32,
-}
-
-fn decode_incoming_update_add_htlc_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
-       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
-) -> Result<(onion_utils::Hop, [u8; 32], Option<NextPacketDetails>), HTLCFailureMsg>
-where
-       NS::Target: NodeSigner,
-       L::Target: Logger,
-{
-       macro_rules! return_malformed_err {
-               ($msg: expr, $err_code: expr) => {
-                       {
-                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
-                                       channel_id: msg.channel_id,
-                                       htlc_id: msg.htlc_id,
-                                       sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(),
-                                       failure_code: $err_code,
-                               }));
-                       }
-               }
-       }
-
-       if let Err(_) = msg.onion_routing_packet.public_key {
-               return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
-       }
-
-       let shared_secret = node_signer.ecdh(
-               Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
-       ).unwrap().secret_bytes();
-
-       if msg.onion_routing_packet.version != 0 {
-               //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
-               //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
-               //the hash doesn't really serve any purpose - in the case of hashing all data, the
-               //receiving node would have to brute force to figure out which version was put in the
-               //packet by the node that send us the message, in the case of hashing the hop_data, the
-               //node knows the HMAC matched, so they already know what is there...
-               return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
-       }
-       macro_rules! return_err {
-               ($msg: expr, $err_code: expr, $data: expr) => {
-                       {
-                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                               return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
-                                       channel_id: msg.channel_id,
-                                       htlc_id: msg.htlc_id,
-                                       reason: HTLCFailReason::reason($err_code, $data.to_vec())
-                                               .get_encrypted_failure_packet(&shared_secret, &None),
-                               }));
-                       }
-               }
-       }
-
-       let next_hop = match onion_utils::decode_next_payment_hop(
-               shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
-               msg.payment_hash, node_signer
-       ) {
-               Ok(res) => res,
-               Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
-                       return_malformed_err!(err_msg, err_code);
-               },
-               Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                       return_err!(err_msg, err_code, &[0; 0]);
-               },
-       };
-
-       let next_packet_details = match next_hop {
-               onion_utils::Hop::Forward {
-                       next_hop_data: msgs::InboundOnionPayload::Forward {
-                               short_channel_id, amt_to_forward, outgoing_cltv_value
-                       }, ..
-               } => {
-                       let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx,
-                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
-                       NextPacketDetails {
-                               next_packet_pubkey, outgoing_scid: short_channel_id,
-                               outgoing_amt_msat: amt_to_forward, outgoing_cltv_value
-                       }
-               },
-               onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
-               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
-                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
-               {
-                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
-               }
-       };
-
-       Ok((next_hop, shared_secret, Some(next_packet_details)))
-}
-
-fn check_incoming_htlc_cltv(
-       cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32
-) -> Result<(), (&'static str, u16)> {
-       if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
-               return Err((
-                       "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
-                       0x1000 | 13, // incorrect_cltv_expiry
-               ));
-       }
-       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
-       // but we want to be robust wrt to counterparty packet sanitization (see
-       // HTLC_FAIL_BACK_BUFFER rationale).
-       if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-               return Err(("CLTV expiry is too close", 0x1000 | 14));
-       }
-       if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
-               return Err(("CLTV expiry is too far in the future", 21));
-       }
-       // If the HTLC expires ~now, don't bother trying to forward it to our
-       // counterparty. They should fail it anyway, but we don't want to bother with
-       // the round-trips or risk them deciding they definitely want the HTLC and
-       // force-closing to ensure they get it if we're offline.
-       // We previously had a much more aggressive check here which tried to ensure
-       // our counterparty receives an HTLC which has *our* risk threshold met on it,
-       // but there is no need to do that, and since we're a bit conservative with our
-       // risk threshold it just results in failing to forward payments.
-       if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
-               return Err(("Outgoing CLTV value is too soon", 0x1000 | 14));
-       }
-
-       Ok(())
-}
-
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
        M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
@@ -8263,7 +8060,7 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
        }
 }
 
@@ -8289,13 +8086,13 @@ where
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
-               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
+               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
                        .map(|(a, b)| (a, Vec::new(), b)));
 
                let last_best_block_height = self.best_block.read().unwrap().height();
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
-                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
                }
        }
 
@@ -8312,7 +8109,7 @@ where
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -8361,7 +8158,7 @@ where
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
-                                       channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
+                                       channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
                                } else { Ok((None, Vec::new(), None)) }
                        } else { Ok((None, Vec::new(), None)) }
                });
@@ -8408,10 +8205,11 @@ where
                                                                        timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
                                                                                HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
                                                                }
+                                                               let logger = WithChannelContext::from(&self.logger, &channel.context);
                                                                if let Some(channel_ready) = channel_ready_opt {
                                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                                        if channel.context.is_usable() {
-                                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+                                                                               log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
                                                                                if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                                                node_id: channel.context.get_counterparty_node_id(),
@@ -8419,7 +8217,7 @@ where
                                                                                        });
                                                                                }
                                                                        } else {
-                                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+                                                                               log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
                                                                        }
                                                                }
 
@@ -8429,7 +8227,7 @@ where
                                                                }
 
                                                                if let Some(announcement_sigs) = announcement_sigs {
-                                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+                                                                       log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
                                                                        pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                                                node_id: channel.context.get_counterparty_node_id(),
                                                                                msg: announcement_sigs,
@@ -8520,6 +8318,7 @@ where
                                                incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
                                                phantom_shared_secret: None,
                                                outpoint: htlc.prev_funding_outpoint,
+                                               blinded_failure: htlc.forward_info.routing.blinded_failure(),
                                        });
 
                                        let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
@@ -8529,7 +8328,10 @@ where
                                        timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
-                                       log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
+                                       let logger = WithContext::from(
+                                               &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+                                       );
+                                       log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
                                } else { true }
                        });
@@ -8836,8 +8638,11 @@ where
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
-                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
-                               log_pubkey!(counterparty_node_id));
+                       log_debug!(
+                               WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+                               "Marking channels with {} disconnected and generating channel_updates.",
+                               log_pubkey!(counterparty_node_id)
+                       );
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
@@ -8845,7 +8650,8 @@ where
                                peer_state.channel_by_id.retain(|_, phase| {
                                        let context = match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
                                                                // We only retain funded channels that are not shutdown.
                                                                return true;
                                                        }
@@ -8932,8 +8738,9 @@ where
        }
 
        fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
                if !init_msg.features.supports_static_remote_key() {
-                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
+                       log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
                        return Err(());
                }
 
@@ -8985,7 +8792,7 @@ where
                                }
                        }
 
-                       log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+                       log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
@@ -9002,9 +8809,10 @@ where
                                                None
                                        }
                                ).for_each(|chan| {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
                                                node_id: chan.context.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&self.logger),
+                                               msg: chan.get_channel_reestablish(&&logger),
                                        });
                                });
                        }
@@ -9483,9 +9291,14 @@ impl_writeable_tlv_based!(PhantomRouteHints, {
        (6, real_node_pubkey, required),
 });
 
+impl_writeable_tlv_based!(BlindedForward, {
+       (0, inbound_blinding_point, required),
+});
+
 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
        (0, Forward) => {
                (0, onion_packet, required),
+               (1, blinded, option),
                (2, short_channel_id, required),
        },
        (1, Receive) => {
@@ -9587,10 +9400,15 @@ impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
        (1, Fail),
 );
 
+impl_writeable_tlv_based_enum!(BlindedFailure,
+       (0, FromIntroductionNode) => {}, ;
+);
+
 impl_writeable_tlv_based!(HTLCPreviousHopData, {
        (0, short_channel_id, required),
        (1, phantom_shared_secret, option),
        (2, outpoint, required),
+       (3, blinded_failure, option),
        (4, htlc_id, required),
        (6, incoming_packet_shared_secret, required),
        (7, user_channel_id, option),
@@ -10207,6 +10025,7 @@ where
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
+                       let logger = WithChannelContext::from(&args.logger, &channel.context);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
@@ -10215,22 +10034,22 @@ where
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
-                                       log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
-                                       log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+                                       log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+                                       log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                        &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        }
                                        if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
                                        }
                                        if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
                                        }
                                        if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
                                        let mut shutdown_result = channel.context.force_shutdown(true);
@@ -10263,14 +10082,14 @@ where
                                                        // claim update ChannelMonitor updates were persisted prior to persising
                                                        // the ChannelMonitor update for the forward leg, so attempting to fail the
                                                        // backwards leg of the HTLC will simply be rejected.
-                                                       log_info!(args.logger,
+                                                       log_info!(logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
                                                                &channel.context.channel_id(), &payment_hash);
                                                        failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
-                                       log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
                                                &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
@@ -10304,18 +10123,19 @@ where
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
-                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                               log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
-                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                               log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
+                               log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                               log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                               log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+                               log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                return Err(DecodeError::InvalidValue);
                        }
                }
 
-               for (funding_txo, _) in args.channel_monitors.iter() {
+               for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
-                               log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+                               let logger = WithChannelMonitor::from(&args.logger, monitor);
+                               log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
                                        &funding_txo.to_channel_id());
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
@@ -10492,12 +10312,12 @@ where
                let mut pending_background_events = Vec::new();
                macro_rules! handle_in_flight_updates {
                        ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
-                        $monitor: expr, $peer_state: expr, $channel_info_log: expr
+                        $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
                        ) => { {
                                let mut max_in_flight_update_id = 0;
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
-                                       log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
+                                       log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
                                                update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
@@ -10518,7 +10338,7 @@ where
                                                });
                                }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
-                                       log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
+                                       log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
                                        return Err(DecodeError::InvalidValue);
                                }
                                max_in_flight_update_id
@@ -10530,6 +10350,8 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for phase in peer_state.channel_by_id.values() {
                                if let ChannelPhase::Funded(chan) = phase {
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
+
                                        // Channels that were persisted have to be funded, otherwise they should have been
                                        // discarded.
                                        let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
@@ -10540,19 +10362,19 @@ where
                                                if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
                                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id,
                                                                handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
-                                                                       funding_txo, monitor, peer_state, ""));
+                                                                       funding_txo, monitor, peer_state, logger, ""));
                                                }
                                        }
                                        if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
                                                // If the channel is ahead of the monitor, return InvalidValue:
-                                               log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+                                               log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
                                                        chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
-                                               log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
-                                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                               log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                               log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+                                               log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                               log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                               log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                               log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                                return Err(DecodeError::InvalidValue);
                                        }
                                } else {
@@ -10566,6 +10388,7 @@ where
 
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
+                               let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
@@ -10575,15 +10398,15 @@ where
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
-                                               funding_txo, monitor, peer_state, "closed ");
+                                               funding_txo, monitor, peer_state, logger, "closed ");
                                } else {
-                                       log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
+                                       log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.",
                                                &funding_txo.to_channel_id());
-                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -10608,11 +10431,13 @@ where
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
                                let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+                               let chan_id = monitor.get_funding_txo().0.to_channel_id();
                                if counterparty_opt.is_none() {
+                                       let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
                                                        if path.hops.is_empty() {
-                                                               log_error!(args.logger, "Got an empty path for a pending payment");
+                                                               log_error!(logger, "Got an empty path for a pending payment");
                                                                return Err(DecodeError::InvalidValue);
                                                        }
 
@@ -10622,8 +10447,8 @@ where
                                                        match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
                                                                hash_map::Entry::Occupied(mut entry) => {
                                                                        let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
-                                                                       log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
-                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), &htlc.payment_hash);
+                                                                       log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
+                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
                                                                },
                                                                hash_map::Entry::Vacant(entry) => {
                                                                        let path_fee = path.fee_msat();
@@ -10643,7 +10468,7 @@ where
                                                                                starting_block_height: best_block_height,
                                                                                remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup
                                                                        });
-                                                                       log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
+                                                                       log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
                                                                                path_amt, &htlc.payment_hash,  log_bytes!(session_priv_bytes));
                                                                }
                                                        }
@@ -10665,7 +10490,7 @@ where
                                                                        forwards.retain(|forward| {
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
-                                                                                               log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                                               log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
                                                                                                        &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                                false
                                                                                        } else { true }
@@ -10675,7 +10500,7 @@ where
                                                                });
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
-                                                                               log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                               log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
                                                                                        &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
@@ -10703,7 +10528,7 @@ where
                                                                                        counterparty_node_id: path.hops[0].pubkey,
                                                                                };
                                                                        pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
-                                                                               path, false, compl_action, &pending_events, &args.logger);
+                                                                               path, false, compl_action, &pending_events, &&logger);
                                                                        pending_events_read = pending_events.into_inner().unwrap();
                                                                }
                                                        },
@@ -10834,6 +10659,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
                                if let ChannelPhase::Funded(chan) = phase {
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
                                        if chan.context.outbound_scid_alias() == 0 {
                                                let mut outbound_scid_alias;
                                                loop {
@@ -10845,14 +10671,14 @@ where
                                        } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
-                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                               log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
                                        if chan.context.is_usable() {
                                                if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
                                                        // Note that in rare cases its possible to hit this while reading an older
                                                        // channel if we just happened to pick a colliding outbound alias above.
-                                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                                       log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                        return Err(DecodeError::InvalidValue);
                                                }
                                        }
@@ -10903,11 +10729,13 @@ where
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
-                                                               channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                                               let logger = WithChannelContext::from(&args.logger, &channel.context);
+                                                               channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
                                                        }
                                                }
                                                if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
-                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
+                                                       let logger = WithChannelMonitor::from(&args.logger, previous_hop_monitor);
+                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &&logger);
                                                }
                                        }
                                        pending_events_read.push_back((events::Event::PaymentClaimed {
@@ -10924,14 +10752,15 @@ where
 
                for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
                        if let Some(peer_state) = per_peer_state.get(&node_id) {
-                               for (_, actions) in monitor_update_blocked_actions.iter() {
+                               for (channel_id, actions) in monitor_update_blocked_actions.iter() {
+                                       let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
                                                                Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
                                                } = action {
                                                        if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
-                                                               log_trace!(args.logger,
+                                                               log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                        blocked_channel_outpoint.to_channel_id());
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
@@ -10952,7 +10781,7 @@ where
                                }
                                peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
                        } else {
-                               log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+                               log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -11040,11 +10869,10 @@ mod tests {
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
        use crate::ln::ChannelId;
        use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
-       use crate::ln::features::{ChannelFeatures, NodeFeatures};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{self, ErrorAction};
        use crate::ln::msgs::ChannelMessageHandler;
-       use crate::routing::router::{Path, PaymentParameters, RouteHop, RouteParameters, find_route};
+       use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
        use crate::util::test_utils;
        use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
@@ -12320,137 +12148,6 @@ mod tests {
                        check_spends!(txn[0], funding_tx);
                }
        }
-
-       #[test]
-       fn test_peel_payment_onion() {
-               use super::*;
-               let secp_ctx = Secp256k1::new();
-
-               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
-               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
-               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
-               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
-
-               let (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
-                       prng_seed, hops, recipient_amount, pay_secret) = payment_onion_args(bob_pk, charlie_pk);
-
-               let path = Path {
-                       hops: hops,
-                       blinded_tail: None,
-               };
-
-               let (amount_msat, cltv_expiry, onion) = create_payment_onion(
-                       &secp_ctx, &path, &session_priv, total_amt_msat, recipient_onion, cur_height,
-                       payment_hash, Some(preimage), prng_seed
-               ).unwrap();
-
-               let msg = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, onion);
-               let logger = test_utils::TestLogger::with_id("bob".to_string());
-
-               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true)
-                       .map_err(|e| e.msg).unwrap();
-
-               let next_onion = match peeled.routing {
-                       PendingHTLCRouting::Forward { onion_packet, short_channel_id: _ } => {
-                               onion_packet
-                       },
-                       _ => panic!("expected a forwarded onion"),
-               };
-
-               let msg2 = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, next_onion);
-               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true)
-                       .map_err(|e| e.msg).unwrap();
-
-               match peeled2.routing {
-                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_data, incoming_cltv_expiry, .. } => {
-                               assert_eq!(payment_preimage, preimage);
-                               assert_eq!(peeled2.outgoing_amt_msat, recipient_amount);
-                               assert_eq!(incoming_cltv_expiry, peeled2.outgoing_cltv_value);
-                               let msgs::FinalOnionHopData{total_msat, payment_secret} = payment_data.unwrap();
-                               assert_eq!(total_msat, total_amt_msat);
-                               assert_eq!(payment_secret, pay_secret);
-                       },
-                       _ => panic!("expected a received keysend"),
-               };
-       }
-
-       fn make_update_add_msg(
-               amount_msat: u64, cltv_expiry: u32, payment_hash: PaymentHash,
-               onion_routing_packet: msgs::OnionPacket
-       ) -> msgs::UpdateAddHTLC {
-               msgs::UpdateAddHTLC {
-                       channel_id: ChannelId::from_bytes([0; 32]),
-                       htlc_id: 0,
-                       amount_msat,
-                       cltv_expiry,
-                       payment_hash,
-                       onion_routing_packet,
-                       skimmed_fee_msat: None,
-               }
-       }
-
-       fn payment_onion_args(hop_pk: PublicKey, recipient_pk: PublicKey) -> (
-               SecretKey, u64, u32, RecipientOnionFields, PaymentPreimage, PaymentHash, [u8; 32],
-               Vec<RouteHop>, u64, PaymentSecret,
-       ) {
-               let session_priv_bytes = [42; 32];
-               let session_priv = SecretKey::from_slice(&session_priv_bytes).unwrap();
-               let total_amt_msat = 1000;
-               let cur_height = 1000;
-               let pay_secret = PaymentSecret([99; 32]);
-               let recipient_onion = RecipientOnionFields::secret_only(pay_secret);
-               let preimage_bytes = [43; 32];
-               let preimage = PaymentPreimage(preimage_bytes);
-               let rhash_bytes = Sha256::hash(&preimage_bytes).to_byte_array();
-               let payment_hash = PaymentHash(rhash_bytes);
-               let prng_seed = [44; 32];
-
-               // make a route alice -> bob -> charlie
-               let hop_fee = 1;
-               let recipient_amount = total_amt_msat - hop_fee;
-               let hops = vec![
-                       RouteHop {
-                               pubkey: hop_pk,
-                               fee_msat: hop_fee,
-                               cltv_expiry_delta: 42,
-                               short_channel_id: 1,
-                               node_features: NodeFeatures::empty(),
-                               channel_features: ChannelFeatures::empty(),
-                               maybe_announced_channel: false,
-                       },
-                       RouteHop {
-                               pubkey: recipient_pk,
-                               fee_msat: recipient_amount,
-                               cltv_expiry_delta: 42,
-                               short_channel_id: 2,
-                               node_features: NodeFeatures::empty(),
-                               channel_features: ChannelFeatures::empty(),
-                               maybe_announced_channel: false,
-                       }
-               ];
-
-               (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
-                       prng_seed, hops, recipient_amount, pay_secret)
-       }
-
-       pub fn create_payment_onion<T: bitcoin::secp256k1::Signing>(
-               secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey, total_msat: u64,
-               recipient_onion: RecipientOnionFields, best_block_height: u32, payment_hash: PaymentHash,
-               keysend_preimage: Option<PaymentPreimage>, prng_seed: [u8; 32]
-       ) -> Result<(u64, u32, msgs::OnionPacket), ()> {
-               let onion_keys = super::onion_utils::construct_onion_keys(&secp_ctx, &path, &session_priv).map_err(|_| ())?;
-               let (onion_payloads, htlc_msat, htlc_cltv) = super::onion_utils::build_onion_payloads(
-                       &path,
-                       total_msat,
-                       recipient_onion,
-                       best_block_height + 1,
-                       &keysend_preimage,
-               ).map_err(|_| ())?;
-               let onion_packet = super::onion_utils::construct_onion_packet(
-                       onion_payloads, onion_keys, prng_seed, &payment_hash
-               )?;
-               Ok((htlc_msat, htlc_cltv, onion_packet))
-       }
 }
 
 #[cfg(ldk_bench)]
index 3475e89264d2eb6e0fc1b41fb7f0f97b3c815190..a41cf306880d73220742168f64a4258327b5043b 100644 (file)
@@ -693,7 +693,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                *feerate_lock += 4;
        }
        nodes[0].node.timer_tick_occurred();
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
        check_added_monitors!(nodes[0], 0);
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
@@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
        //Should produce and error.
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
@@ -1415,6 +1415,7 @@ fn test_fee_spike_violation_fails_htlc() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1528,7 +1529,7 @@ fn test_fee_spike_violation_fails_htlc() {
                },
                _ => panic!("Unexpected event"),
        };
-       nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
+       nodes[1].logger.assert_log("lightning::ln::channel",
                format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
 
        check_added_monitors!(nodes[1], 2);
@@ -1611,11 +1612,12 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
@@ -1789,11 +1791,12 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
@@ -3510,6 +3513,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                        cltv_expiry,
                        onion_routing_packet,
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
@@ -5926,7 +5930,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        // us to surface its failure to the user.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6014,7 +6018,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        // to surface its failure to the user. The first payment should succeed.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6288,7 +6292,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        updates.update_add_htlcs[0].amount_msat = 0;
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
@@ -6481,6 +6485,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet.clone(),
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        for i in 0..50 {
@@ -9838,10 +9843,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
+                       nodes[0].logger.assert_log("lightning::ln::channel",
                                format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
                                        dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
                                        max_dust_htlc_exposure_msat), 1);
index fb809041af5a491e91efc612880a0ad128676ab9..827d77419892ac7c9c0ba8a1a90bd09d1ee6ccd5 100644 (file)
@@ -13,6 +13,7 @@
 #[macro_use]
 pub mod functional_test_utils;
 
+pub mod onion_payment;
 pub mod channelmanager;
 pub mod channel_keys;
 pub mod inbound_payment;
index d5529e98ba013c4f8d2118edb5de4bebb82470df..2d871b354a26d5643bece3211ddaa9986978bebd 100644 (file)
@@ -31,7 +31,7 @@ use bitcoin::{secp256k1, Witness};
 use bitcoin::blockdata::script::ScriptBuf;
 use bitcoin::hash_types::Txid;
 
-use crate::blinded_path::payment::ReceiveTlvs;
+use crate::blinded_path::payment::{BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs};
 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 use crate::ln::onion_utils;
@@ -680,7 +680,11 @@ pub struct UpdateAddHTLC {
        ///
        /// [`ChannelConfig::accept_underpaying_htlcs`]: crate::util::config::ChannelConfig::accept_underpaying_htlcs
        pub skimmed_fee_msat: Option<u64>,
-       pub(crate) onion_routing_packet: OnionPacket,
+       /// The onion routing packet with encrypted data for the next hop.
+       pub onion_routing_packet: OnionPacket,
+       /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion
+       /// routing packet and the recipient-provided encrypted payload within.
+       pub blinding_point: Option<PublicKey>,
 }
 
  /// An onion message to be sent to or received from a peer.
@@ -1662,9 +1666,10 @@ pub trait OnionMessageHandler {
 
 mod fuzzy_internal_msgs {
        use bitcoin::secp256k1::PublicKey;
-       use crate::blinded_path::payment::PaymentConstraints;
+       use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
        use crate::prelude::*;
        use crate::ln::{PaymentPreimage, PaymentSecret};
+       use crate::ln::features::BlindedHopFeatures;
 
        // These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
        // them from untrusted input):
@@ -1691,6 +1696,13 @@ mod fuzzy_internal_msgs {
                        amt_msat: u64,
                        outgoing_cltv_value: u32,
                },
+               BlindedForward {
+                       short_channel_id: u64,
+                       payment_relay: PaymentRelay,
+                       payment_constraints: PaymentConstraints,
+                       features: BlindedHopFeatures,
+                       intro_node_blinding_point: PublicKey,
+               },
                BlindedReceive {
                        amt_msat: u64,
                        total_msat: u64,
@@ -2211,6 +2223,7 @@ impl_writeable_msg!(UpdateAddHTLC, {
        cltv_expiry,
        onion_routing_packet,
 }, {
+       (0, blinding_point, option),
        (65537, skimmed_fee_msat, option)
 });
 
@@ -2349,7 +2362,23 @@ impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: Node
                        let mut s = Cursor::new(&enc_tlvs);
                        let mut reader = FixedLengthReader::new(&mut s, enc_tlvs.len() as u64);
                        match ChaChaPolyReadAdapter::read(&mut reader, rho)? {
-                               ChaChaPolyReadAdapter { readable: ReceiveTlvs { payment_secret, payment_constraints }} => {
+                               ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Forward(ForwardTlvs {
+                                       short_channel_id, payment_relay, payment_constraints, features
+                               })} => {
+                                       if amt.is_some() || cltv_value.is_some() || total_msat.is_some() {
+                                               return Err(DecodeError::InvalidValue)
+                                       }
+                                       Ok(Self::BlindedForward {
+                                               short_channel_id,
+                                               payment_relay,
+                                               payment_constraints,
+                                               features,
+                                               intro_node_blinding_point: blinding_point,
+                                       })
+                               },
+                               ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(ReceiveTlvs {
+                                       payment_secret, payment_constraints
+                               })} => {
                                        if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
                                        Ok(Self::BlindedReceive {
                                                amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
@@ -3756,6 +3785,7 @@ mod tests {
                        cltv_expiry: 821716,
                        onion_routing_packet,
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                };
                let encoded_value = update_add_htlc.encode();
                let target_value = <Vec<u8>>::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap();
diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs
new file mode 100644 (file)
index 0000000..ca15a37
--- /dev/null
@@ -0,0 +1,567 @@
+//! Utilities for channelmanager.rs
+//!
+//! Includes a public [`peel_payment_onion`] function for use by external projects or libraries.
+
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, Secp256k1, PublicKey};
+
+use crate::blinded_path;
+use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
+use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::ln::PaymentHash;
+use crate::ln::channelmanager::{BlindedForward, CLTV_FAR_FAR_AWAY, HTLCFailureMsg, MIN_CLTV_EXPIRY_DELTA, PendingHTLCInfo, PendingHTLCRouting};
+use crate::ln::features::BlindedHopFeatures;
+use crate::ln::msgs;
+use crate::ln::onion_utils;
+use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
+use crate::sign::{NodeSigner, Recipient};
+use crate::util::logger::Logger;
+
+use crate::prelude::*;
+use core::ops::Deref;
+
+/// Invalid inbound onion payment.
+pub struct InboundOnionErr {
+       /// BOLT 4 error code.
+       pub err_code: u16,
+       /// Data attached to this error.
+       pub err_data: Vec<u8>,
+       /// Error message text.
+       pub msg: &'static str,
+}
+
+fn check_blinded_forward(
+       inbound_amt_msat: u64, inbound_cltv_expiry: u32, payment_relay: &PaymentRelay,
+       payment_constraints: &PaymentConstraints, features: &BlindedHopFeatures
+) -> Result<(u64, u32), ()> {
+       let amt_to_forward = blinded_path::payment::amt_to_forward_msat(
+               inbound_amt_msat, payment_relay
+       ).ok_or(())?;
+       let outgoing_cltv_value = inbound_cltv_expiry.checked_sub(
+               payment_relay.cltv_expiry_delta as u32
+       ).ok_or(())?;
+       if inbound_amt_msat < payment_constraints.htlc_minimum_msat ||
+               outgoing_cltv_value > payment_constraints.max_cltv_expiry
+               { return Err(()) }
+       if features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) }
+       Ok((amt_to_forward, outgoing_cltv_value))
+}
+
+pub(super) fn create_fwd_pending_htlc_info(
+       msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
+       new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
+       next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+) -> Result<PendingHTLCInfo, InboundOnionErr> {
+       debug_assert!(next_packet_pubkey_opt.is_some());
+       let outgoing_packet = msgs::OnionPacket {
+               version: 0,
+               public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
+               hop_data: new_packet_bytes,
+               hmac: hop_hmac,
+       };
+
+       let (
+               short_channel_id, amt_to_forward, outgoing_cltv_value, inbound_blinding_point
+       ) = match hop_data {
+               msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
+                       (short_channel_id, amt_to_forward, outgoing_cltv_value, None),
+               msgs::InboundOnionPayload::BlindedForward {
+                       short_channel_id, payment_relay, payment_constraints, intro_node_blinding_point, features,
+               } => {
+                       let (amt_to_forward, outgoing_cltv_value) = check_blinded_forward(
+                               msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &features
+                       ).map_err(|()| {
+                               // We should be returning malformed here if `msg.blinding_point` is set, but this is
+                               // unreachable right now since we checked it in `decode_update_add_htlc_onion`.
+                               InboundOnionErr {
+                                       msg: "Underflow calculating outbound amount or cltv value for blinded forward",
+                                       err_code: INVALID_ONION_BLINDING,
+                                       err_data: vec![0; 32],
+                               }
+                       })?;
+                       (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(intro_node_blinding_point))
+               },
+               msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
+                       return Err(InboundOnionErr {
+                               msg: "Final Node OnionHopData provided for us as an intermediary node",
+                               err_code: 0x4000 | 22,
+                               err_data: Vec::new(),
+                       }),
+       };
+
+       Ok(PendingHTLCInfo {
+               routing: PendingHTLCRouting::Forward {
+                       onion_packet: outgoing_packet,
+                       short_channel_id,
+                       blinded: inbound_blinding_point.map(|bp| BlindedForward { inbound_blinding_point: bp }),
+               },
+               payment_hash: msg.payment_hash,
+               incoming_shared_secret: shared_secret,
+               incoming_amt_msat: Some(msg.amount_msat),
+               outgoing_amt_msat: amt_to_forward,
+               outgoing_cltv_value,
+               skimmed_fee_msat: None,
+       })
+}
+
+pub(super) fn create_recv_pending_htlc_info(
+       hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
+       amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
+       counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
+) -> Result<PendingHTLCInfo, InboundOnionErr> {
+       let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
+               msgs::InboundOnionPayload::Receive {
+                       payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
+               } =>
+                       (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
+               msgs::InboundOnionPayload::BlindedReceive {
+                       amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
+               } => {
+                       let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
+                       (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
+               }
+               msgs::InboundOnionPayload::Forward { .. } => {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "Got non final data with an HMAC of 0",
+                       })
+               },
+               msgs::InboundOnionPayload::BlindedForward { .. } => {
+                       return Err(InboundOnionErr {
+                               err_code: INVALID_ONION_BLINDING,
+                               err_data: vec![0; 32],
+                               msg: "Got blinded non final data with an HMAC of 0",
+                       })
+               }
+       };
+       // final_incorrect_cltv_expiry
+       if outgoing_cltv_value > cltv_expiry {
+               return Err(InboundOnionErr {
+                       msg: "Upstream node set CLTV to less than the CLTV set by the sender",
+                       err_code: 18,
+                       err_data: cltv_expiry.to_be_bytes().to_vec()
+               })
+       }
+       // final_expiry_too_soon
+       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+       //
+       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+       if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
+               let mut err_data = Vec::with_capacity(12);
+               err_data.extend_from_slice(&amt_msat.to_be_bytes());
+               err_data.extend_from_slice(&current_height.to_be_bytes());
+               return Err(InboundOnionErr {
+                       err_code: 0x4000 | 15, err_data,
+                       msg: "The final CLTV expiry is too soon to handle",
+               });
+       }
+       if (!allow_underpay && onion_amt_msat > amt_msat) ||
+               (allow_underpay && onion_amt_msat >
+                amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
+       {
+               return Err(InboundOnionErr {
+                       err_code: 19,
+                       err_data: amt_msat.to_be_bytes().to_vec(),
+                       msg: "Upstream node sent less than we were supposed to receive in payment",
+               });
+       }
+
+       let routing = if let Some(payment_preimage) = keysend_preimage {
+               // We need to check that the sender knows the keysend preimage before processing this
+               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+               // could discover the final destination of X, by probing the adjacent nodes on the route
+               // with a keysend payment of identical payment hash to X and observing the processing
+               // time discrepancies due to a hash collision with X.
+               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
+               if hashed_preimage != payment_hash {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "Payment preimage didn't match payment hash",
+                       });
+               }
+               if !accept_mpp_keysend && payment_data.is_some() {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "We don't support MPP keysend payments",
+                       });
+               }
+               PendingHTLCRouting::ReceiveKeysend {
+                       payment_data,
+                       payment_preimage,
+                       payment_metadata,
+                       incoming_cltv_expiry: outgoing_cltv_value,
+                       custom_tlvs,
+               }
+       } else if let Some(data) = payment_data {
+               PendingHTLCRouting::Receive {
+                       payment_data: data,
+                       payment_metadata,
+                       incoming_cltv_expiry: outgoing_cltv_value,
+                       phantom_shared_secret,
+                       custom_tlvs,
+               }
+       } else {
+               return Err(InboundOnionErr {
+                       err_code: 0x4000|0x2000|3,
+                       err_data: Vec::new(),
+                       msg: "We require payment_secrets",
+               });
+       };
+       Ok(PendingHTLCInfo {
+               routing,
+               payment_hash,
+               incoming_shared_secret: shared_secret,
+               incoming_amt_msat: Some(amt_msat),
+               outgoing_amt_msat: onion_amt_msat,
+               outgoing_cltv_value,
+               skimmed_fee_msat: counterparty_skimmed_fee_msat,
+       })
+}
+
+/// Peel one layer off an incoming onion, returning [`PendingHTLCInfo`] (either Forward or Receive).
+/// This does all the relevant context-free checks that LDK requires for payment relay or
+/// acceptance. If the payment is to be received, and the amount matches the expected amount for
+/// a given invoice, this indicates the [`msgs::UpdateAddHTLC`], once fully committed in the
+/// channel, will generate an [`Event::PaymentClaimable`].
+///
+/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
+       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
+       cur_height: u32, accept_mpp_keysend: bool,
+) -> Result<PendingHTLCInfo, InboundOnionErr>
+where
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+{
+       let (hop, shared_secret, next_packet_details_opt) =
+               decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx
+       ).map_err(|e| {
+               let (err_code, err_data) = match e {
+                       HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()),
+                       HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason.data),
+               };
+               let msg = "Failed to decode update add htlc onion";
+               InboundOnionErr { msg, err_code, err_data }
+       })?;
+       Ok(match hop {
+               onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
+                       let NextPacketDetails {
+                               next_packet_pubkey, outgoing_amt_msat: _, outgoing_scid: _, outgoing_cltv_value
+                       } = match next_packet_details_opt {
+                               Some(next_packet_details) => next_packet_details,
+                               // Forward should always include the next hop details
+                               None => return Err(InboundOnionErr {
+                                       msg: "Failed to decode update add htlc onion",
+                                       err_code: 0x4000 | 22,
+                                       err_data: Vec::new(),
+                               }),
+                       };
+
+                       if let Err((err_msg, code)) = check_incoming_htlc_cltv(
+                               cur_height, outgoing_cltv_value, msg.cltv_expiry
+                       ) {
+                               return Err(InboundOnionErr {
+                                       msg: err_msg,
+                                       err_code: code,
+                                       err_data: Vec::new(),
+                               });
+                       }
+                       create_fwd_pending_htlc_info(
+                               msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret,
+                               Some(next_packet_pubkey)
+                       )?
+               },
+               onion_utils::Hop::Receive(received_data) => {
+                       create_recv_pending_htlc_info(
+                               received_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry,
+                               None, false, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
+                       )?
+               }
+       })
+}
+
+pub(super) struct NextPacketDetails {
+       pub(super) next_packet_pubkey: Result<PublicKey, secp256k1::Error>,
+       pub(super) outgoing_scid: u64,
+       pub(super) outgoing_amt_msat: u64,
+       pub(super) outgoing_cltv_value: u32,
+}
+
+pub(super) fn decode_incoming_update_add_htlc_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
+       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
+) -> Result<(onion_utils::Hop, [u8; 32], Option<NextPacketDetails>), HTLCFailureMsg>
+where
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+{
+       macro_rules! return_malformed_err {
+               ($msg: expr, $err_code: expr) => {
+                       {
+                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       channel_id: msg.channel_id,
+                                       htlc_id: msg.htlc_id,
+                                       sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(),
+                                       failure_code: $err_code,
+                               }));
+                       }
+               }
+       }
+
+       if let Err(_) = msg.onion_routing_packet.public_key {
+               return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
+       }
+
+       let shared_secret = node_signer.ecdh(
+               Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
+       ).unwrap().secret_bytes();
+
+       if msg.onion_routing_packet.version != 0 {
+               //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
+               //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
+               //the hash doesn't really serve any purpose - in the case of hashing all data, the
+               //receiving node would have to brute force to figure out which version was put in the
+               //packet by the node that send us the message, in the case of hashing the hop_data, the
+               //node knows the HMAC matched, so they already know what is there...
+               return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
+       }
+       macro_rules! return_err {
+               ($msg: expr, $err_code: expr, $data: expr) => {
+                       {
+                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                               return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       channel_id: msg.channel_id,
+                                       htlc_id: msg.htlc_id,
+                                       reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                               .get_encrypted_failure_packet(&shared_secret, &None),
+                               }));
+                       }
+               }
+       }
+
+       let next_hop = match onion_utils::decode_next_payment_hop(
+               shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
+               msg.payment_hash, node_signer
+       ) {
+               Ok(res) => res,
+               Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
+                       return_malformed_err!(err_msg, err_code);
+               },
+               Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
+                       return_err!(err_msg, err_code, &[0; 0]);
+               },
+       };
+
+       let next_packet_details = match next_hop {
+               onion_utils::Hop::Forward {
+                       next_hop_data: msgs::InboundOnionPayload::Forward {
+                               short_channel_id, amt_to_forward, outgoing_cltv_value
+                       }, ..
+               } => {
+                       let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx,
+                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+                       NextPacketDetails {
+                               next_packet_pubkey, outgoing_scid: short_channel_id,
+                               outgoing_amt_msat: amt_to_forward, outgoing_cltv_value
+                       }
+               },
+               onion_utils::Hop::Forward {
+                       next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+                               short_channel_id, ref payment_relay, ref payment_constraints, ref features, ..
+                       }, ..
+               } => {
+                       let (amt_to_forward, outgoing_cltv_value) = match check_blinded_forward(
+                               msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &features
+                       ) {
+                               Ok((amt, cltv)) => (amt, cltv),
+                               Err(()) => {
+                                       return_err!("Underflow calculating outbound amount or cltv value for blinded forward",
+                                               INVALID_ONION_BLINDING, &[0; 32]);
+                               }
+                       };
+                       let next_packet_pubkey = onion_utils::next_hop_pubkey(&secp_ctx,
+                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+                       NextPacketDetails {
+                               next_packet_pubkey, outgoing_scid: short_channel_id, outgoing_amt_msat: amt_to_forward,
+                               outgoing_cltv_value
+                       }
+               },
+               onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
+               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
+                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
+               {
+                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
+               }
+       };
+
+       Ok((next_hop, shared_secret, Some(next_packet_details)))
+}
+
+pub(super) fn check_incoming_htlc_cltv(
+       cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32
+) -> Result<(), (&'static str, u16)> {
+       if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+               return Err((
+                       "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+                       0x1000 | 13, // incorrect_cltv_expiry
+               ));
+       }
+       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+       // but we want to be robust wrt to counterparty packet sanitization (see
+       // HTLC_FAIL_BACK_BUFFER rationale).
+       if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
+               return Err(("CLTV expiry is too close", 0x1000 | 14));
+       }
+       if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+               return Err(("CLTV expiry is too far in the future", 21));
+       }
+       // If the HTLC expires ~now, don't bother trying to forward it to our
+       // counterparty. They should fail it anyway, but we don't want to bother with
+       // the round-trips or risk them deciding they definitely want the HTLC and
+       // force-closing to ensure they get it if we're offline.
+       // We previously had a much more aggressive check here which tried to ensure
+       // our counterparty receives an HTLC which has *our* risk threshold met on it,
+       // but there is no need to do that, and since we're a bit conservative with our
+       // risk threshold it just results in failing to forward payments.
+       if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+               return Err(("Outgoing CLTV value is too soon", 0x1000 | 14));
+       }
+
+       Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
+       use bitcoin::secp256k1::{PublicKey, SecretKey};
+       use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
+       use crate::ln::channelmanager::RecipientOnionFields;
+       use crate::ln::features::{ChannelFeatures, NodeFeatures};
+       use crate::ln::msgs;
+       use crate::ln::onion_utils::create_payment_onion;
+       use crate::routing::router::{Path, RouteHop};
+       use crate::util::test_utils;
+
+       #[test]
+       fn test_peel_payment_onion() {
+               use super::*;
+               let secp_ctx = Secp256k1::new();
+
+               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
+               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
+               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
+               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
+
+               let (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
+                       prng_seed, hops, recipient_amount, pay_secret) = payment_onion_args(bob_pk, charlie_pk);
+
+               let path = Path {
+                       hops: hops,
+                       blinded_tail: None,
+               };
+
+               let (onion, amount_msat, cltv_expiry) = create_payment_onion(
+                       &secp_ctx, &path, &session_priv, total_amt_msat, recipient_onion, cur_height,
+                       &payment_hash, &Some(preimage), prng_seed
+               ).unwrap();
+
+               let msg = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, onion);
+               let logger = test_utils::TestLogger::with_id("bob".to_string());
+
+               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true)
+                       .map_err(|e| e.msg).unwrap();
+
+               let next_onion = match peeled.routing {
+                       PendingHTLCRouting::Forward { onion_packet, .. } => {
+                               onion_packet
+                       },
+                       _ => panic!("expected a forwarded onion"),
+               };
+
+               let msg2 = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, next_onion);
+               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true)
+                       .map_err(|e| e.msg).unwrap();
+
+               match peeled2.routing {
+                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_data, incoming_cltv_expiry, .. } => {
+                               assert_eq!(payment_preimage, preimage);
+                               assert_eq!(peeled2.outgoing_amt_msat, recipient_amount);
+                               assert_eq!(incoming_cltv_expiry, peeled2.outgoing_cltv_value);
+                               let msgs::FinalOnionHopData{total_msat, payment_secret} = payment_data.unwrap();
+                               assert_eq!(total_msat, total_amt_msat);
+                               assert_eq!(payment_secret, pay_secret);
+                       },
+                       _ => panic!("expected a received keysend"),
+               };
+       }
+
+       fn make_update_add_msg(
+               amount_msat: u64, cltv_expiry: u32, payment_hash: PaymentHash,
+               onion_routing_packet: msgs::OnionPacket
+       ) -> msgs::UpdateAddHTLC {
+               msgs::UpdateAddHTLC {
+                       channel_id: ChannelId::from_bytes([0; 32]),
+                       htlc_id: 0,
+                       amount_msat,
+                       cltv_expiry,
+                       payment_hash,
+                       onion_routing_packet,
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               }
+       }
+
+       fn payment_onion_args(hop_pk: PublicKey, recipient_pk: PublicKey) -> (
+               SecretKey, u64, u32, RecipientOnionFields, PaymentPreimage, PaymentHash, [u8; 32],
+               Vec<RouteHop>, u64, PaymentSecret,
+       ) {
+               let session_priv_bytes = [42; 32];
+               let session_priv = SecretKey::from_slice(&session_priv_bytes).unwrap();
+               let total_amt_msat = 1000;
+               let cur_height = 1000;
+               let pay_secret = PaymentSecret([99; 32]);
+               let recipient_onion = RecipientOnionFields::secret_only(pay_secret);
+               let preimage_bytes = [43; 32];
+               let preimage = PaymentPreimage(preimage_bytes);
+               let rhash_bytes = Sha256::hash(&preimage_bytes).to_byte_array();
+               let payment_hash = PaymentHash(rhash_bytes);
+               let prng_seed = [44; 32];
+
+               // make a route alice -> bob -> charlie
+               let hop_fee = 1;
+               let recipient_amount = total_amt_msat - hop_fee;
+               let hops = vec![
+                       RouteHop {
+                               pubkey: hop_pk,
+                               fee_msat: hop_fee,
+                               cltv_expiry_delta: 42,
+                               short_channel_id: 1,
+                               node_features: NodeFeatures::empty(),
+                               channel_features: ChannelFeatures::empty(),
+                               maybe_announced_channel: false,
+                       },
+                       RouteHop {
+                               pubkey: recipient_pk,
+                               fee_msat: recipient_amount,
+                               cltv_expiry_delta: 42,
+                               short_channel_id: 2,
+                               node_features: NodeFeatures::empty(),
+                               channel_features: ChannelFeatures::empty(),
+                               maybe_announced_channel: false,
+                       }
+               ];
+
+               (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
+                       prng_seed, hops, recipient_amount, pay_secret)
+       }
+
+}
index 31f2f7827bcb57c42d7b2a625a842abdaa00ab17..051e78c46eed51f14b4038119217d180ee40aff0 100644 (file)
@@ -242,6 +242,8 @@ pub(super) fn build_onion_payloads(path: &Path, total_msat: u64, mut recipient_o
 /// the hops can be of variable length.
 pub(crate) const ONION_DATA_LEN: usize = 20*65;
 
+pub(super) const INVALID_ONION_BLINDING: u16 = 0x8000 | 0x4000 | 24;
+
 #[inline]
 fn shift_slice_right(arr: &mut [u8], amt: usize) {
        for i in (amt..arr.len()).rev() {
@@ -433,11 +435,22 @@ pub(crate) struct DecodedOnionFailure {
        pub(crate) onion_error_data: Option<Vec<u8>>,
 }
 
+/// Note that we always decrypt `packet` in-place here even if the deserialization into
+/// [`msgs::DecodedOnionErrorPacket`] ultimately fails.
+fn decrypt_onion_error_packet(
+       packet: &mut Vec<u8>, shared_secret: SharedSecret
+) -> Result<msgs::DecodedOnionErrorPacket, msgs::DecodeError> {
+       let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
+       let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
+       chacha.process_in_place(packet);
+       msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(packet))
+}
+
 /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an
 /// OutboundRoute).
 #[inline]
 pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
-       secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>
+       secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut encrypted_packet: Vec<u8>
 ) -> DecodedOnionFailure where L::Target: Logger {
        let (path, session_priv, first_hop_htlc_msat) = if let &HTLCSource::OutboundRoute {
                ref path, ref session_priv, ref first_hop_htlc_msat, ..
@@ -491,8 +504,21 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
                                Some(hop) => hop,
                                None => {
                                        // The failing hop is within a multi-hop blinded path.
-                                       error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
-                                       error_packet_ret = Some(vec![0; 32]);
+                                       #[cfg(not(test))] {
+                                               error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
+                                               error_packet_ret = Some(vec![0; 32]);
+                                       }
+                                       #[cfg(test)] {
+                                               // Actually parse the onion error data in tests so we can check that blinded hops fail
+                                               // back correctly.
+                                               let err_packet = decrypt_onion_error_packet(
+                                                       &mut encrypted_packet, shared_secret
+                                               ).unwrap();
+                                               error_code_ret =
+                                                       Some(u16::from_be_bytes(err_packet.failuremsg.get(0..2).unwrap().try_into().unwrap()));
+                                               error_packet_ret = Some(err_packet.failuremsg[2..].to_vec());
+                                       }
+
                                        res = Some(FailureLearnings {
                                                network_update: None, short_channel_id: None, payment_failed_permanently: false
                                        });
@@ -504,15 +530,7 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
                let amt_to_forward = htlc_msat - route_hop.fee_msat;
                htlc_msat = amt_to_forward;
 
-               let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
-
-               let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
-               decryption_tmp.resize(packet_decrypted.len(), 0);
-               let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
-               chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
-               packet_decrypted = decryption_tmp;
-
-               let err_packet = match msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
+               let err_packet = match decrypt_onion_error_packet(&mut encrypted_packet, shared_secret) {
                        Ok(p) => p,
                        Err(_) => return
                };
@@ -722,9 +740,11 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
 }
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(PartialEq))]
 pub(super) struct HTLCFailReason(HTLCFailReasonRepr);
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(PartialEq))]
 enum HTLCFailReasonRepr {
        LightningError {
                err: msgs::OnionErrorPacket,
index 3949b97e0d03019a5f79ad4f293d0d450a9d970a..f061772890bf4c168a1735ce1a8c1527ae3431d0 100644 (file)
@@ -35,7 +35,7 @@ use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
 use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
 use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, WithContext};
 use crate::util::string::PrintableString;
 
 use crate::prelude::*;
@@ -1253,10 +1253,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
+               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
+                       log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
                } else {
-                       log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0))
+                       log_trace!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0))
                }
                peer.msgs_sent_since_pong += 1;
                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message));
@@ -1355,9 +1356,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                macro_rules! insert_node_id {
                                                        () => {
+                                                               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                                                match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
                                                                        hash_map::Entry::Occupied(e) => {
-                                                                               log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
+                                                                               log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
                                                                                // Check that the peers map is consistent with the
                                                                                // node_id_to_descriptor map, as this has been broken
@@ -1366,7 +1368,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                return Err(PeerHandleError { })
                                                                        },
                                                                        hash_map::Entry::Vacant(entry) => {
-                                                                               log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
+                                                                               log_debug!(logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                entry.insert(peer_descriptor.clone())
                                                                        },
                                                                };
@@ -1434,6 +1436,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.pending_read_buffer.resize(18, 0);
                                                                        peer.pending_read_is_header = true;
 
+                                                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                                                        let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
@@ -1443,16 +1446,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                // the messages enqueued here to not actually
                                                                                                // be sent before the peer is disconnected.
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                       log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
+                                                                                                       log_gossip!(logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
                                                                                                        continue;
                                                                                                }
                                                                                                (msgs::DecodeError::UnsupportedCompression, _) => {
-                                                                                                       log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
+                                                                                                       log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() });
                                                                                                        continue;
                                                                                                }
                                                                                                (_, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                       log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
+                                                                                                       log_gossip!(logger, "Got an invalid value while deserializing a gossip message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage {
                                                                                                                channel_id: ChannelId::new_zero(),
                                                                                                                data: format!("Unreadable/bogus gossip message of type {}", ty),
@@ -1460,16 +1463,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                        continue;
                                                                                                }
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, _) => {
-                                                                                                       log_debug!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
+                                                                                                       log_debug!(logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
                                                                                                (msgs::DecodeError::InvalidValue, _) => {
-                                                                                                       log_debug!(self.logger, "Got an invalid value while deserializing message");
+                                                                                                       log_debug!(logger, "Got an invalid value while deserializing message");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::ShortRead, _) => {
-                                                                                                       log_debug!(self.logger, "Deserialization failed due to shortness of message");
+                                                                                                       log_debug!(logger, "Deserialization failed due to shortness of message");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
@@ -1519,6 +1522,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
        ) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
                let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
+               let logger = WithContext::from(&self.logger, Some(their_node_id), None);
                peer_lock.received_message_since_timer_tick = true;
 
                // Need an Init as first message
@@ -1536,7 +1540,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                }
                                        }
                                        if !have_compatible_chains {
-                                               log_debug!(self.logger, "Peer does not support any of our supported chains");
+                                               log_debug!(logger, "Peer does not support any of our supported chains");
                                                return Err(PeerHandleError { }.into());
                                        }
                                }
@@ -1544,12 +1548,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                        let our_features = self.init_features(&their_node_id);
                        if msg.features.requires_unknown_bits_from(&our_features) {
-                               log_debug!(self.logger, "Peer requires features unknown to us");
+                               log_debug!(logger, "Peer requires features unknown to us");
                                return Err(PeerHandleError { }.into());
                        }
 
                        if our_features.requires_unknown_bits_from(&msg.features) {
-                               log_debug!(self.logger, "We require features unknown to our peer");
+                               log_debug!(logger, "We require features unknown to our peer");
                                return Err(PeerHandleError { }.into());
                        }
 
@@ -1557,7 +1561,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                return Err(PeerHandleError { }.into());
                        }
 
-                       log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
+                       log_info!(logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
 
                        // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter.
                        if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() {
@@ -1565,22 +1569,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        }
 
                        if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
                        if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
                        if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
 
                        peer_lock.their_features = Some(msg.features);
                        return Ok(None);
                } else if peer_lock.their_features.is_none() {
-                       log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
+                       log_debug!(logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
                        return Err(PeerHandleError { }.into());
                }
 
@@ -1602,9 +1606,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                mem::drop(peer_lock);
 
                if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+                       log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
                } else {
-                       log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+                       log_trace!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
                }
 
                let mut should_forward = None;
@@ -1618,14 +1622,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // Handled above
                        },
                        wire::Message::Error(msg) => {
-                               log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
+                               log_debug!(logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
                                if msg.channel_id.is_zero() {
                                        return Err(PeerHandleError { }.into());
                                }
                        },
                        wire::Message::Warning(msg) => {
-                               log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
+                               log_debug!(logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                        },
 
                        wire::Message::Ping(msg) => {
@@ -1789,11 +1793,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                        // Unknown messages:
                        wire::Message::Unknown(type_id) if message.is_even() => {
-                               log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
+                               log_debug!(logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
                                return Err(PeerHandleError { }.into());
                        },
                        wire::Message::Unknown(type_id) => {
-                               log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
+                               log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id);
                        },
                        wire::Message::Custom(custom) => {
                                self.message_handler.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
@@ -1810,6 +1814,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
@@ -1817,7 +1822,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if let Some((_, their_node_id)) = peer.their_node_id {
@@ -1837,6 +1842,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
@@ -1844,7 +1850,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if let Some((_, their_node_id)) = peer.their_node_id {
@@ -1864,6 +1870,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
@@ -1871,7 +1878,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node {
@@ -1953,31 +1960,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                for event in events_generated.drain(..) {
                                        match event {
                                                MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id,
                                                                        log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
@@ -1986,13 +1993,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReady event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
@@ -2022,67 +2029,67 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxComplete event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAbort event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
-                                                       log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(commitment_signed.channel_id)), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        update_add_htlcs.len(),
                                                                        update_fulfill_htlcs.len(),
@@ -2107,31 +2114,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *peer, commitment_signed);
                                                },
                                                MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling Shutdown event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
                                                                        log_pubkey!(node_id),
                                                                        msg.contents.short_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
@@ -2169,18 +2176,19 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        }
                                                },
                                                MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
-                                                       log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+                                                       log_trace!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id), msg.contents.short_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::HandleError { node_id, action } => {
+                                                       let logger = WithContext::from(&self.logger, Some(node_id), None);
                                                        match action {
                                                                msgs::ErrorAction::DisconnectPeer { msg } => {
                                                                        if let Some(msg) = msg.as_ref() {
-                                                                               log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                               log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id), msg.data);
                                                                        } else {
-                                                                               log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}",
+                                                                               log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}",
                                                                                        log_pubkey!(node_id));
                                                                        }
                                                                        // We do not have the peers write lock, so we just store that we're
@@ -2190,7 +2198,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peers_to_disconnect.insert(node_id, msg);
                                                                },
                                                                msgs::ErrorAction::DisconnectPeerWithWarning { msg } => {
-                                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                       log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id), msg.data);
                                                                        // We do not have the peers write lock, so we just store that we're
                                                                        // about to disconenct the peer and do it after we finish
@@ -2198,20 +2206,20 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
                                                                },
                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                                       log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                                       log_given_level!(logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
                                                                },
                                                                msgs::ErrorAction::IgnoreDuplicateGossip => {},
                                                                msgs::ErrorAction::IgnoreError => {
-                                                                               log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                                               log_debug!(logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
                                                                        },
                                                                msgs::ErrorAction::SendErrorMessage { ref msg } => {
-                                                                       log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
+                                                                       log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id),
                                                                                        msg.data);
                                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
                                                                },
                                                                msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
-                                                                       log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
+                                                                       log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id),
                                                                                        msg.data);
                                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
@@ -2225,7 +2233,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
-                                                       log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+                                                       log_gossip!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
                                                                log_pubkey!(node_id),
                                                                msg.short_channel_ids.len(),
                                                                msg.first_blocknum,
@@ -2299,7 +2307,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                debug_assert!(peer.their_node_id.is_some());
                if let Some((node_id, _)) = peer.their_node_id {
-                       log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
+                       log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Disconnecting peer with id {} due to {}", node_id, reason);
                        self.message_handler.chan_handler.peer_disconnected(&node_id);
                        self.message_handler.onion_message_handler.peer_disconnected(&node_id);
                }
@@ -2318,7 +2326,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        Some(peer_lock) => {
                                let peer = peer_lock.lock().unwrap();
                                if let Some((node_id, _)) = peer.their_node_id {
-                                       log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
+                                       log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Handling disconnection of peer {}", log_pubkey!(node_id));
                                        let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
                                        debug_assert!(removed.is_some(), "descriptor maps should be consistent");
                                        if !peer.handshake_complete() { return; }
index 052e3eb6e5d98eaace5bd39b71061bd0cc3b6b63..223aa5dbac30ba64604992d6b272722ceb3ed1c0 100644 (file)
@@ -15,9 +15,10 @@ use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
 use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
-use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
+use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, Retry, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
+use crate::routing::router::{RouteParameters, PaymentParameters};
 use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
@@ -493,7 +494,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        assert!(found_err);
 }
 
-fn do_test_data_loss_protect(reconnect_panicing: bool) {
+#[cfg(feature = "std")]
+fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) {
        // When we get a data_loss_protect proving we're behind, we immediately panic as the
        // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
        // panic message informs the user they should force-close without broadcasting, which is tested
@@ -517,8 +519,38 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        let previous_node_state = nodes[0].node.encode();
        let previous_chain_monitor_state = get_monitor!(nodes[0], chan.2).encode();
 
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       assert!(!substantially_old || !not_stale, "substantially_old and not_stale doesn't make sense");
+       if not_stale || !substantially_old {
+               // Previously, we'd only hit the data_loss_protect assertion if we had a state which
+               // revoked at least two revocations ago, not the latest revocation. Here, we use
+               // `not_stale` to test the boundary condition.
+               let pay_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), 100, false);
+               let route_params = RouteParameters::from_payment_params_and_value(pay_params, 40000);
+               nodes[0].node.send_spontaneous_payment_with_retry(None, RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), route_params, Retry::Attempts(0)).unwrap();
+               check_added_monitors(&nodes[0], 1);
+               let update_add_commit = SendEvent::from_node(&nodes[0]);
+
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_commit.msgs[0]);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_add_commit.commitment_msg);
+               check_added_monitors(&nodes[1], 1);
+               let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
+               check_added_monitors(&nodes[0], 1);
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               if !not_stale {
+                       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
+                       check_added_monitors(&nodes[0], 1);
+                       // A now revokes their original state, at which point reconnect should panic
+                       let raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+                       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
+                       check_added_monitors(&nodes[1], 1);
+                       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               }
+       } else {
+               send_payment(&nodes[0], &[&nodes[1]], 8000000);
+               send_payment(&nodes[0], &[&nodes[1]], 8000000);
+       }
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -535,89 +567,131 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
-               // Check we close channel detecting A is fallen-behind
-               // Check that we sent the warning message when we detected that A has fallen behind,
-               // and give the possibility for A to recover from the warning.
+               // If A has fallen behind substantially, B should send it a message letting it know
+               // that.
                nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-               let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
-               assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+               let reestablish_msg;
+               if substantially_old {
+                       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction: 0 (received) vs 4 (expected)".to_owned();
+
+                       let warn_reestablish = nodes[1].node.get_and_clear_pending_msg_events();
+                       assert_eq!(warn_reestablish.len(), 2);
+                       match warn_reestablish[1] {
+                               MessageSendEvent::HandleError { action: ErrorAction::SendWarningMessage { ref msg, .. }, .. } => {
+                                       assert_eq!(msg.data, warn_msg);
+                               },
+                               _ => panic!("Unexpected events: {:?}", warn_reestablish),
+                       }
+                       reestablish_msg = match &warn_reestablish[0] {
+                               MessageSendEvent::SendChannelReestablish { msg, .. } => msg.clone(),
+                               _ => panic!("Unexpected events: {:?}", warn_reestablish),
+                       };
+               } else {
+                       let msgs = nodes[1].node.get_and_clear_pending_msg_events();
+                       assert!(msgs.len() >= 4);
+                       match msgs.last() {
+                               Some(MessageSendEvent::SendChannelUpdate { .. }) => {},
+                               _ => panic!("Unexpected events: {:?}", msgs),
+                       }
+                       assert!(msgs.iter().any(|msg| matches!(msg, MessageSendEvent::SendRevokeAndACK { .. })));
+                       assert!(msgs.iter().any(|msg| matches!(msg, MessageSendEvent::UpdateHTLCs { .. })));
+                       reestablish_msg = match &msgs[0] {
+                               MessageSendEvent::SendChannelReestablish { msg, .. } => msg.clone(),
+                               _ => panic!("Unexpected events: {:?}", msgs),
+                       };
+               }
 
                {
-                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-                       // The node B should not broadcast the transaction to force close the channel!
+                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+                       // The node B should never force-close the channel.
                        assert!(node_txn.is_empty());
                }
 
-               let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                // Check A panics upon seeing proof it has fallen behind.
-               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-               return; // By this point we should have panic'ed!
-       }
+               let reconnect_res = std::panic::catch_unwind(|| {
+                       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_msg);
+               });
+               if not_stale {
+                       assert!(reconnect_res.is_ok());
+                       // At this point A gets confused because B expects a commitment state newer than A
+                       // has sent, but not a newer revocation secret, so A just (correctly) closes.
+                       check_closed_broadcast(&nodes[0], 1, true);
+                       check_added_monitors(&nodes[0], 1);
+                       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError {
+                               err: "Peer attempted to reestablish channel with a future remote commitment transaction: 2 (received) vs 1 (expected)".to_owned()
+                       }, [nodes[1].node.get_our_node_id()], 1000000);
+               } else {
+                       assert!(reconnect_res.is_err());
+                       // Skip the `Drop` handler for `Node`s as some may be in an invalid (panicked) state.
+                       std::mem::forget(nodes);
+               }
+       } else {
+               assert!(!not_stale, "We only care about the stale case when not testing panicking");
 
-       nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
-       {
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 0);
-       }
+               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
+               {
+                       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+                       assert_eq!(node_txn.len(), 0);
+               }
+
+               for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+                       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+                       } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
+                               match action {
+                                       &ErrorAction::DisconnectPeer { ref msg } => {
+                                               assert_eq!(msg.as_ref().unwrap().data, "Channel force-closed");
+                                       },
+                                       _ => panic!("Unexpected event!"),
+                               }
+                       } else {
+                               panic!("Unexpected event {:?}", msg)
+                       }
+               }
 
-       for msg in nodes[0].node.get_and_clear_pending_msg_events() {
-               if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
-               } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
+               // after the warning message sent by B, we should not able to
+               // use the channel, or reconnect with success to the channel.
+               assert!(nodes[0].node.list_usable_channels().is_empty());
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+               let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
+               let mut err_msgs_0 = Vec::with_capacity(1);
+               if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] {
                        match action {
-                               &ErrorAction::DisconnectPeer { ref msg } => {
-                                       assert_eq!(msg.as_ref().unwrap().data, "Channel force-closed");
+                               &ErrorAction::SendErrorMessage { ref msg } => {
+                                       assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()));
+                                       err_msgs_0.push(msg.clone());
                                },
                                _ => panic!("Unexpected event!"),
                        }
                } else {
-                       panic!("Unexpected event {:?}", msg)
+                       panic!("Unexpected event!");
                }
+               assert_eq!(err_msgs_0.len(), 1);
+               nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
+               assert!(nodes[1].node.list_usable_channels().is_empty());
+               check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
+                       , [nodes[0].node.get_our_node_id()], 1000000);
+               check_closed_broadcast!(nodes[1], false);
        }
-
-       // after the warning message sent by B, we should not able to
-       // use the channel, or reconnect with success to the channel.
-       assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
-               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
-       }, true).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
-       }, false).unwrap();
-       let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
-
-       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
-       let mut err_msgs_0 = Vec::with_capacity(1);
-       if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] {
-               match action {
-                       &ErrorAction::SendErrorMessage { ref msg } => {
-                               assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()));
-                               err_msgs_0.push(msg.clone());
-                       },
-                       _ => panic!("Unexpected event!"),
-               }
-       } else {
-               panic!("Unexpected event!");
-       }
-       assert_eq!(err_msgs_0.len(), 1);
-       nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
-       assert!(nodes[1].node.list_usable_channels().is_empty());
-       check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
-               , [nodes[0].node.get_our_node_id()], 1000000);
-       check_closed_broadcast!(nodes[1], false);
-}
-
-#[test]
-#[should_panic]
-fn test_data_loss_protect_showing_stale_state_panics() {
-       do_test_data_loss_protect(true);
 }
 
 #[test]
-fn test_force_close_without_broadcast() {
-       do_test_data_loss_protect(false);
+#[cfg(feature = "std")]
+fn test_data_loss_protect() {
+       do_test_data_loss_protect(true, false, true);
+       do_test_data_loss_protect(true, true, false);
+       do_test_data_loss_protect(true, false, false);
+       do_test_data_loss_protect(false, true, false);
+       do_test_data_loss_protect(false, false, false);
 }
 
 #[test]
index bc7c013771fb0e049d23283d6fce6f533d1d4ae1..308211d0b370285e509c6885a60de084ba214d06 100644 (file)
@@ -13,10 +13,11 @@ use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::ChannelMonitorUpdateStatus;
 use crate::chain::transaction::OutPoint;
 use crate::events::{MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
-use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
+use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry, ChannelShutdownState, ChannelDetails};
 use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
+use crate::ln::onion_utils::INVALID_ONION_BLINDING;
 use crate::ln::script::ShutdownScript;
 use crate::util::test_utils;
 use crate::util::test_utils::OnGetShutdownScriptpubkey;
@@ -401,6 +402,11 @@ fn updates_shutdown_wait() {
 
 #[test]
 fn htlc_fail_async_shutdown() {
+       do_htlc_fail_async_shutdown(true);
+       do_htlc_fail_async_shutdown(false);
+}
+
+fn do_htlc_fail_async_shutdown(blinded_recipient: bool) {
        // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
@@ -409,9 +415,20 @@ fn htlc_fail_async_shutdown() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
-       nodes[0].node.send_payment_with_route(&route, our_payment_hash,
-               RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
+       let amt_msat = 100000;
+       let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = if blinded_recipient {
+               crate::ln::blinded_payment_tests::get_blinded_route_parameters(
+                       amt_msat, our_payment_secret,
+                       nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_2.0.contents],
+                       &chanmon_cfgs[2].keys_manager)
+       } else {
+               RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), amt_msat)
+       };
+       nodes[0].node.send_payment(our_payment_hash,
+               RecipientOnionFields::secret_only(our_payment_secret),
+               PaymentId(our_payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
        check_added_monitors!(nodes[0], 1);
        let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        assert_eq!(updates.update_add_htlcs.len(), 1);
@@ -441,7 +458,12 @@ fn htlc_fail_async_shutdown() {
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
 
-       expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
+       if blinded_recipient {
+               expect_payment_failed_conditions(&nodes[0], our_payment_hash, false,
+                       PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+       } else {
+               expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
+       }
 
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
index 0e68d09143c00d251c69ee251162960b05ec3bed..0e88b1932df826ea922c7f5f215b1c76fce1aa77 100644 (file)
@@ -71,7 +71,7 @@ use crate::prelude::*;
 /// # use std::sync::Arc;
 /// # struct FakeLogger;
 /// # impl Logger for FakeLogger {
-/// #     fn log(&self, record: &Record) { unimplemented!() }
+/// #     fn log(&self, record: Record) { unimplemented!() }
 /// # }
 /// # struct FakeMessageRouter {}
 /// # impl MessageRouter for FakeMessageRouter {
index a2d9f51a38e6a4edc2a8e64c35798c5184e1c5ca..395731502cb06b2b8dce9667674514703f126b16 100644 (file)
@@ -1795,12 +1795,12 @@ where L::Target: Logger {
                                        let payment_failed_on_this_channel = scid_opt.map_or(false,
                                                |scid| payment_params.previously_failed_channels.contains(&scid));
 
-                                       let should_log_candidate = match $candidate {
-                                               CandidateRouteHop::FirstHop { .. } => true,
-                                               CandidateRouteHop::PrivateHop { .. } => true,
-                                               CandidateRouteHop::Blinded { .. } => true,
-                                               CandidateRouteHop::OneHopBlinded { .. } => true,
-                                               _ => false,
+                                       let (should_log_candidate, first_hop_details) = match $candidate {
+                                               CandidateRouteHop::FirstHop { details } => (true, Some(details)),
+                                               CandidateRouteHop::PrivateHop { .. } => (true, None),
+                                               CandidateRouteHop::Blinded { .. } => (true, None),
+                                               CandidateRouteHop::OneHopBlinded { .. } => (true, None),
+                                               _ => (false, None),
                                        };
 
                                        // If HTLC minimum is larger than the amount we're going to transfer, we shouldn't
@@ -1810,6 +1810,13 @@ where L::Target: Logger {
                                        if !contributes_sufficient_value {
                                                if should_log_candidate {
                                                        log_trace!(logger, "Ignoring {} due to insufficient value contribution.", LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_limit_msat: {}",
+                                                                       details.next_outbound_htlc_limit_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_value_contribution += 1;
                                        } else if exceeds_max_path_length {
@@ -1820,6 +1827,14 @@ where L::Target: Logger {
                                        } else if exceeds_cltv_delta_limit {
                                                if should_log_candidate {
                                                        log_trace!(logger, "Ignoring {} due to exceeding CLTV delta limit.", LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(_) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate cltv_expiry_delta: {}. Limit: {}",
+                                                                       hop_total_cltv_delta,
+                                                                       max_total_cltv_expiry_delta,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_cltv_delta_limit += 1;
                                        } else if payment_failed_on_this_channel {
@@ -1832,6 +1847,13 @@ where L::Target: Logger {
                                                        log_trace!(logger,
                                                                "Ignoring {} to avoid overpaying to meet htlc_minimum_msat limit.",
                                                                LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_minimum_msat: {}",
+                                                                       details.next_outbound_htlc_minimum_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_avoid_overpayment += 1;
                                                hit_minimum_limit = true;
@@ -1893,6 +1915,14 @@ where L::Target: Logger {
                                                        if total_fee_msat > max_total_routing_fee_msat {
                                                                if should_log_candidate {
                                                                        log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate));
+
+                                                                       if let Some(_) = first_hop_details {
+                                                                               log_trace!(logger,
+                                                                                       "First hop candidate routing fee: {}. Limit: {}",
+                                                                                       total_fee_msat,
+                                                                                       max_total_routing_fee_msat,
+                                                                               );
+                                                                       }
                                                                }
                                                                num_ignored_total_fee_limit += 1;
                                                        } else {
@@ -1988,6 +2018,13 @@ where L::Target: Logger {
                                                        log_trace!(logger,
                                                                "Ignoring {} due to its htlc_minimum_msat limit.",
                                                                LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_minimum_msat: {}",
+                                                                       details.next_outbound_htlc_minimum_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_htlc_minimum_msat_limit += 1;
                                        }
@@ -8116,7 +8153,7 @@ pub mod benches {
 
        struct DummyLogger {}
        impl Logger for DummyLogger {
-               fn log(&self, _record: &Record) {}
+               fn log(&self, _record: Record) {}
        }
 
        pub fn generate_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
index 9c907c3f7fe4bd38d7526b9d10871769ea537d27..de40b295e49ea88b67979b22f60900c0d57ce676 100644 (file)
@@ -26,7 +26,7 @@
 //! #
 //! # struct FakeLogger {};
 //! # impl Logger for FakeLogger {
-//! #     fn log(&self, record: &Record) { unimplemented!() }
+//! #     fn log(&self, record: Record) { unimplemented!() }
 //! # }
 //! # fn find_scored_route(payer: PublicKey, route_params: RouteParameters, network_graph: NetworkGraph<&FakeLogger>) {
 //! # let logger = FakeLogger {};
index 52855050b3d9408b0c694467ef48bd0c95c1def8..2e98213c1825f2c0af8802bcc0838c2dc4d7af55 100644 (file)
@@ -42,11 +42,6 @@ pub trait EcdsaChannelSigner: ChannelSigner {
                inbound_htlc_preimages: Vec<PaymentPreimage>,
                outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
        ) -> Result<(Signature, Vec<Signature>), ()>;
-       /// Validate the counterparty's revocation.
-       ///
-       /// This is required in order for the signer to make sure that the state has moved
-       /// forward and it is safe to sign the next counterparty commitment.
-       fn validate_counterparty_revocation(&self, idx: u64, secret: &SecretKey) -> Result<(), ()>;
        /// Creates a signature for a holder's commitment transaction.
        ///
        /// This will be called
index 2e7e39cc9cb9ccc82db63fd1f6e1af991681b758..4e418f049bbc9867a7a2a7920706e17c135ed23e 100644 (file)
@@ -57,7 +57,7 @@ use core::convert::TryInto;
 use core::ops::Deref;
 use core::sync::atomic::{AtomicUsize, Ordering};
 #[cfg(taproot)]
-use musig2::types::{PartialSignature, PublicNonce, SecretNonce};
+use musig2::types::{PartialSignature, PublicNonce};
 use crate::io::{self, Error};
 use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -603,6 +603,12 @@ pub trait ChannelSigner {
        fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction,
                outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()>;
 
+       /// Validate the counterparty's revocation.
+       ///
+       /// This is required in order for the signer to make sure that the state has moved
+       /// forward and it is safe to sign the next counterparty commitment.
+       fn validate_counterparty_revocation(&self, idx: u64, secret: &SecretKey) -> Result<(), ()>;
+
        /// Returns the holder's channel public keys and basepoints.
        fn pubkeys(&self) -> &ChannelPublicKeys;
 
@@ -1084,6 +1090,10 @@ impl ChannelSigner for InMemorySigner {
                Ok(())
        }
 
+       fn validate_counterparty_revocation(&self, _idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               Ok(())
+       }
+
        fn pubkeys(&self) -> &ChannelPublicKeys { &self.holder_channel_pubkeys }
 
        fn channel_keys_id(&self) -> [u8; 32] { self.channel_keys_id }
@@ -1131,10 +1141,6 @@ impl EcdsaChannelSigner for InMemorySigner {
                Ok((commitment_sig, htlc_sigs))
        }
 
-       fn validate_counterparty_revocation(&self, _idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               Ok(())
-       }
-
        fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
                let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
                let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
@@ -1258,7 +1264,7 @@ impl TaprootChannelSigner for InMemorySigner {
                todo!()
        }
 
-       fn finalize_holder_commitment(&self, commitment_number: u64, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
                todo!()
        }
 
index ebfce345fcb7de22f8797e14428ac56d8eb9e1f7..230383f4f7d6496bcb1b7d40829b086ea6d39d05 100644 (file)
@@ -53,8 +53,7 @@ pub trait TaprootChannelSigner: ChannelSigner {
        /// An external signer implementation should check that the commitment has not been revoked.
        ///
        // TODO: Document the things someone using this interface should enforce before signing.
-       fn finalize_holder_commitment(&self, commitment_number: u64,
-               commitment_tx: &HolderCommitmentTransaction,
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
                counterparty_partial_signature: PartialSignatureWithNonce,
                secp_ctx: &Secp256k1<secp256k1::All>
        ) -> Result<PartialSignature, ()>;
index dbca9b785e85dfbaf3c68253e7e47b91225c6bdc..8ca5333f63daabf21a5baed562ec8d0ae2053327 100644 (file)
@@ -18,7 +18,9 @@ use bitcoin::secp256k1::PublicKey;
 
 use core::cmp;
 use core::fmt;
+use core::ops::Deref;
 
+use crate::ln::ChannelId;
 #[cfg(c_bindings)]
 use crate::prelude::*; // Needed for String
 
@@ -95,6 +97,15 @@ impl Level {
 pub struct Record<'a> {
        /// The verbosity level of the message.
        pub level: Level,
+       /// The node id of the peer pertaining to the logged record.
+       ///
+       /// Note that in some cases a [`Self::channel_id`] may be filled in but this may still be
+       /// `None`, depending on if the peer information is readily available in LDK when the log is
+       /// generated.
+       pub peer_id: Option<PublicKey>,
+       /// The channel id of the channel pertaining to the logged record. May be a temporary id before
+       /// the channel has been funded.
+       pub channel_id: Option<ChannelId>,
        #[cfg(not(c_bindings))]
        /// The message body.
        pub args: fmt::Arguments<'a>,
@@ -119,9 +130,14 @@ impl<'a> Record<'a> {
        ///
        /// This is not exported to bindings users as fmt can't be used in C
        #[inline]
-       pub fn new(level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32) -> Record<'a> {
+       pub fn new(
+               level: Level, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>,
+               args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32
+       ) -> Record<'a> {
                Record {
                        level,
+                       peer_id,
+                       channel_id,
                        #[cfg(not(c_bindings))]
                        args,
                        #[cfg(c_bindings)]
@@ -135,10 +151,43 @@ impl<'a> Record<'a> {
        }
 }
 
-/// A trait encapsulating the operations required of a logger
+/// A trait encapsulating the operations required of a logger.
 pub trait Logger {
-       /// Logs the `Record`
-       fn log(&self, record: &Record);
+       /// Logs the [`Record`].
+       fn log(&self, record: Record);
+}
+
+/// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`].
+pub struct WithContext<'a, L: Deref> where L::Target: Logger {
+       /// The logger to delegate to after adding context to the record.
+       logger: &'a L,
+       /// The node id of the peer pertaining to the logged record.
+       peer_id: Option<PublicKey>,
+       /// The channel id of the channel pertaining to the logged record.
+       channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithContext<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               if self.peer_id.is_some() {
+                       record.peer_id = self.peer_id
+               };
+               if self.channel_id.is_some() {
+                       record.channel_id = self.channel_id;
+               }
+               self.logger.log(record)
+       }
+}
+
+impl<'a, L: Deref> WithContext<'a, L> where L::Target: Logger {
+       /// Wraps the given logger, providing additional context to any logged records.
+       pub fn from(logger: &'a L, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>) -> Self {
+               WithContext {
+                       logger,
+                       peer_id,
+                       channel_id,
+               }
+       }
 }
 
 /// Wrapper for logging a [`PublicKey`] in hex format.
@@ -191,7 +240,9 @@ impl<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone> fmt::Display fo
 
 #[cfg(test)]
 mod tests {
-       use crate::util::logger::{Logger, Level};
+       use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1};
+       use crate::ln::ChannelId;
+       use crate::util::logger::{Logger, Level, WithContext};
        use crate::util::test_utils::TestLogger;
        use crate::sync::Arc;
 
@@ -232,6 +283,41 @@ mod tests {
                wrapper.call_macros();
        }
 
+       #[test]
+       fn test_logging_with_context() {
+               let logger = &TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let context_logger = WithContext::from(&logger, Some(pk), Some(ChannelId([0; 32])));
+               log_error!(context_logger, "This is an error");
+               log_warn!(context_logger, "This is an error");
+               log_debug!(context_logger, "This is an error");
+               log_trace!(context_logger, "This is an error");
+               log_gossip!(context_logger, "This is an error");
+               log_info!(context_logger, "This is an error");
+               logger.assert_log_context_contains(
+                       "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6
+               );
+       }
+
+       #[test]
+       fn test_logging_with_multiple_wrapped_context() {
+               let logger = &TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let context_logger = &WithContext::from(&logger, None, Some(ChannelId([0; 32])));
+               let full_context_logger = WithContext::from(&context_logger, Some(pk), None);
+               log_error!(full_context_logger, "This is an error");
+               log_warn!(full_context_logger, "This is an error");
+               log_debug!(full_context_logger, "This is an error");
+               log_trace!(full_context_logger, "This is an error");
+               log_gossip!(full_context_logger, "This is an error");
+               log_info!(full_context_logger, "This is an error");
+               logger.assert_log_context_contains(
+                       "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6
+               );
+       }
+
        #[test]
        fn test_log_ordering() {
                assert!(Level::Error > Level::Warn);
index 4836b4d6814f705cb111515a983733c4566e3a34..203c544e0096385c959d124f051bdc15295e958e 100644 (file)
@@ -159,7 +159,7 @@ macro_rules! log_spendable {
 #[macro_export]
 macro_rules! log_internal {
        ($logger: expr, $lvl:expr, $($arg:tt)+) => (
-               $logger.log(&$crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!()))
+               $logger.log($crate::util::logger::Record::new($lvl, None, None, format_args!($($arg)+), module_path!(), file!(), line!()))
        );
 }
 
index 3fcbb50d450dd134fad8a7d0aaaab738fd22b962..a2cbf78b70053d2467cbe41fafe041eb30fa68e4 100644 (file)
@@ -146,6 +146,16 @@ impl ChannelSigner for TestChannelSigner {
                Ok(())
        }
 
+       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               if !*self.available.lock().unwrap() {
+                       return Err(());
+               }
+               let mut state = self.state.lock().unwrap();
+               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
+               state.last_counterparty_revoked_commitment = idx;
+               Ok(())
+       }
+
        fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
 
        fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
@@ -178,16 +188,6 @@ impl EcdsaChannelSigner for TestChannelSigner {
                Ok(self.inner.sign_counterparty_commitment(commitment_tx, inbound_htlc_preimages, outbound_htlc_preimages, secp_ctx).unwrap())
        }
 
-       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               if !*self.available.lock().unwrap() {
-                       return Err(());
-               }
-               let mut state = self.state.lock().unwrap();
-               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
-               state.last_counterparty_revoked_commitment = idx;
-               Ok(())
-       }
-
        fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
                if !*self.available.lock().unwrap() {
                        return Err(());
@@ -292,7 +292,7 @@ impl TaprootChannelSigner for TestChannelSigner {
                todo!()
        }
 
-       fn finalize_holder_commitment(&self, commitment_number: u64, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
                todo!()
        }
 
index e5c63502354fba581c02691c6f4e92cd8ab5fff9..a6c2d77451a411fac9ac1e5f2ba3fffab16ab3d2 100644 (file)
@@ -930,7 +930,8 @@ impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
 pub struct TestLogger {
        level: Level,
        pub(crate) id: String,
-       pub lines: Mutex<HashMap<(String, String), usize>>,
+       pub lines: Mutex<HashMap<(&'static str, String), usize>>,
+       pub context: Mutex<HashMap<(&'static str, Option<PublicKey>, Option<ChannelId>), usize>>,
 }
 
 impl TestLogger {
@@ -941,13 +942,14 @@ impl TestLogger {
                TestLogger {
                        level: Level::Trace,
                        id,
-                       lines: Mutex::new(HashMap::new())
+                       lines: Mutex::new(HashMap::new()),
+                       context: Mutex::new(HashMap::new()),
                }
        }
        pub fn enable(&mut self, level: Level) {
                self.level = level;
        }
-       pub fn assert_log(&self, module: String, line: String, count: usize) {
+       pub fn assert_log(&self, module: &str, line: String, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                assert_eq!(log_entries.get(&(module, line)), Some(&count));
        }
@@ -959,7 +961,7 @@ impl TestLogger {
        pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == module && l.contains(line)
+                       *m == module && l.contains(line)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }
@@ -972,15 +974,24 @@ impl TestLogger {
        pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == module && pattern.is_match(&l)
+                       *m == module && pattern.is_match(&l)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }
+
+       pub fn assert_log_context_contains(
+               &self, module: &str, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>, count: usize
+       ) {
+               let context_entries = self.context.lock().unwrap();
+               let l = context_entries.get(&(module, peer_id, channel_id)).unwrap();
+               assert_eq!(*l, count)
+       }
 }
 
 impl Logger for TestLogger {
-       fn log(&self, record: &Record) {
-               *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
+       fn log(&self, record: Record) {
+               *self.lines.lock().unwrap().entry((record.module_path, format!("{}", record.args))).or_insert(0) += 1;
+               *self.context.lock().unwrap().entry((record.module_path, record.peer_id, record.channel_id)).or_insert(0) += 1;
                if record.level >= self.level {
                        #[cfg(all(not(ldk_bench), feature = "std"))] {
                                let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line);
diff --git a/pending_changelog/route-blinding-intro-node.txt b/pending_changelog/route-blinding-intro-node.txt
new file mode 100644 (file)
index 0000000..3f31d37
--- /dev/null
@@ -0,0 +1,4 @@
+## Backwards Compat
+
+* Forwarding a blinded HTLC and subsequently downgrading to an LDK version prior to 0.0.119 may
+       result in a forwarding failure or an HTLC being failed backwards with an unblinded error.