Merge pull request #2774 from TheBlueMatt/2023-12-2551-followups
authorElias Rohrer <dev@tnull.de>
Fri, 8 Dec 2023 22:46:43 +0000 (23:46 +0100)
committerGitHub <noreply@github.com>
Fri, 8 Dec 2023 22:46:43 +0000 (23:46 +0100)
Doc and performance followups to #2551

16 files changed:
fuzz/src/onion_message.rs
lightning-background-processor/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/events/mod.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_payment.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/onion_message/messenger.rs

index de7b8b6b4c612a412902720df613f768649205d4..1051083d36be759d633b108a8078575d95d2287a 100644 (file)
@@ -79,6 +79,7 @@ impl MessageRouter for TestMessageRouter {
                Ok(OnionMessagePath {
                        intermediate_nodes: vec![],
                        destination,
+                       first_node_addresses: None,
                })
        }
 }
@@ -269,7 +270,9 @@ mod tests {
                                                "Received an onion message with path_id None and a reply_path: Custom(TestCustomMessage)"
                                                .to_string())), Some(&1));
                        assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(),
-                                               "Sending onion message: TestCustomMessage".to_string())), Some(&1));
+                                               "Constructing onion message when responding to Custom onion message with path_id None: TestCustomMessage".to_string())), Some(&1));
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(),
+                                               "Buffered onion message when responding to Custom onion message with path_id None".to_string())), Some(&1));
                }
 
                let two_unblinded_hops_om = "\
index 1e6d447c8cdaf8d8ec9b103ff8869da00d38ed09..449891ba07f2d09e1dd4fa155f8322591e15b3dd 100644 (file)
@@ -30,6 +30,7 @@ use lightning::events::{Event, PathFailure};
 #[cfg(feature = "std")]
 use lightning::events::{EventHandler, EventsProvider};
 use lightning::ln::channelmanager::ChannelManager;
+use lightning::ln::msgs::OnionMessageHandler;
 use lightning::ln::peer_handler::APeerManager;
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::UtxoLookup;
@@ -104,6 +105,11 @@ const PING_TIMER: u64 = 30;
 #[cfg(test)]
 const PING_TIMER: u64 = 1;
 
+#[cfg(not(test))]
+const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
+#[cfg(test)]
+const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
+
 /// Prune the network graph of stale entries hourly.
 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
 
@@ -270,18 +276,20 @@ fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + Wri
 }
 
 macro_rules! define_run_body {
-       ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
-        $channel_manager: ident, $process_channel_manager_events: expr,
-        $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
-        $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
-        $check_slow_await: expr)
-       => { {
+       (
+               $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
+               $channel_manager: ident, $process_channel_manager_events: expr,
+               $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
+               $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
+               $timer_elapsed: expr, $check_slow_await: expr
+       ) => { {
                log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
                $channel_manager.timer_tick_occurred();
                log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
                $chain_monitor.rebroadcast_pending_claims();
 
                let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
+               let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
                let mut last_ping_call = $get_timer(PING_TIMER);
                let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
                let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
@@ -291,6 +299,7 @@ macro_rules! define_run_body {
                loop {
                        $process_channel_manager_events;
                        $process_chain_monitor_events;
+                       $process_onion_message_handler_events;
 
                        // Note that the PeerManager::process_events may block on ChannelManager's locks,
                        // hence it comes last here. When the ChannelManager finishes whatever it's doing,
@@ -334,6 +343,11 @@ macro_rules! define_run_body {
                                $channel_manager.timer_tick_occurred();
                                last_freshness_call = $get_timer(FRESHNESS_TIMER);
                        }
+                       if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
+                               log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
+                               $peer_manager.onion_message_handler().timer_tick_occurred();
+                               last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
+                       }
                        if await_slow {
                                // On various platforms, we may be starved of CPU cycles for several reasons.
                                // E.g. on iOS, if we've been in the background, we will be entirely paused.
@@ -603,8 +617,7 @@ pub async fn process_events_async<
        CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
        PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
        RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-       APM: APeerManager + Send + Sync,
-       PM: 'static + Deref<Target = APM> + Send + Sync,
+       PM: 'static + Deref + Send + Sync,
        S: 'static + Deref<Target = SC> + Send + Sync,
        SC: for<'b> WriteableScore<'b>,
        SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
@@ -627,6 +640,7 @@ where
        L::Target: 'static + Logger,
        P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
        PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+       PM::Target: APeerManager + Send + Sync,
 {
        let mut should_break = false;
        let async_event_handler = |event| {
@@ -650,10 +664,12 @@ where
                        event_handler(event).await;
                }
        };
-       define_run_body!(persister,
-               chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
+       define_run_body!(
+               persister, chain_monitor,
+               chain_monitor.process_pending_events_async(async_event_handler).await,
                channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
-               gossip_sync, peer_manager, logger, scorer, should_break, {
+               peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
+               gossip_sync, logger, scorer, should_break, {
                        let fut = Selector {
                                a: channel_manager.get_event_or_persistence_needed_future(),
                                b: chain_monitor.get_update_future(),
@@ -673,7 +689,29 @@ where
                                task::Poll::Ready(exit) => { should_break = exit; true },
                                task::Poll::Pending => false,
                        }
-               }, mobile_interruptable_platform)
+               }, mobile_interruptable_platform
+       )
+}
+
+#[cfg(feature = "futures")]
+async fn process_onion_message_handler_events_async<
+       EventHandlerFuture: core::future::Future<Output = ()>,
+       EventHandler: Fn(Event) -> EventHandlerFuture,
+       PM: 'static + Deref + Send + Sync,
+>(
+       peer_manager: &PM, handler: EventHandler
+)
+where
+       PM::Target: APeerManager + Send + Sync,
+{
+       use lightning::events::EventsProvider;
+
+       let events = core::cell::RefCell::new(Vec::new());
+       peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
+
+       for event in events.into_inner() {
+               handler(event).await
+       }
 }
 
 #[cfg(feature = "std")]
@@ -742,8 +780,7 @@ impl BackgroundProcessor {
                CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
                PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
                RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-               APM: APeerManager + Send + Sync,
-               PM: 'static + Deref<Target = APM> + Send + Sync,
+               PM: 'static + Deref + Send + Sync,
                S: 'static + Deref<Target = SC> + Send + Sync,
                SC: for <'b> WriteableScore<'b>,
        >(
@@ -763,6 +800,7 @@ impl BackgroundProcessor {
                L::Target: 'static + Logger,
                P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
                PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+               PM::Target: APeerManager + Send + Sync,
        {
                let stop_thread = Arc::new(AtomicBool::new(false));
                let stop_thread_clone = stop_thread.clone();
@@ -782,14 +820,18 @@ impl BackgroundProcessor {
                                }
                                event_handler.handle_event(event);
                        };
-                       define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
+                       define_run_body!(
+                               persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
                                channel_manager, channel_manager.process_pending_events(&event_handler),
-                               gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
+                               peer_manager,
+                               peer_manager.onion_message_handler().process_pending_events(&event_handler),
+                               gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
                                { Sleeper::from_two_futures(
                                        channel_manager.get_event_or_persistence_needed_future(),
                                        chain_monitor.get_update_future()
                                ).wait_timeout(Duration::from_millis(100)); },
-                               |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
+                               |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false
+                       )
                });
                Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
        }
@@ -1362,9 +1404,11 @@ mod tests {
 
        #[test]
        fn test_timer_tick_called() {
-               // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
-               // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
-               // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
+               // Test that:
+               // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
+               // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
+               // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
+               // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
                let (_, nodes) = create_nodes(1, "test_timer_tick_called");
                let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
@@ -1375,9 +1419,11 @@ mod tests {
                        let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
                        let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
                        let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
+                       let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
                        if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
                                log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
-                               log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() {
+                               log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
                                break
                        }
                }
index 39fa3a237a62f02085e904349dc9555daee2c232..1fe3fcd9ff11a8046de0a64f2e95ae7968fdb662 100644 (file)
@@ -620,9 +620,8 @@ where C::Target: chain::Filter,
        pub fn rebroadcast_pending_claims(&self) {
                let monitors = self.monitors.read().unwrap();
                for (_, monitor_holder) in &*monitors {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
                        monitor_holder.monitor.rebroadcast_pending_claims(
-                               &*self.broadcaster, &*self.fee_estimator, &logger
+                               &*self.broadcaster, &*self.fee_estimator, &self.logger
                        )
                }
        }
@@ -640,9 +639,8 @@ where
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        monitor.block_connected(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
                });
        }
 
@@ -650,9 +648,8 @@ where
                let monitor_states = self.monitors.read().unwrap();
                log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
                for monitor_state in monitor_states.values() {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
                        monitor_state.monitor.block_disconnected(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &logger);
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
                }
        }
 }
@@ -669,9 +666,8 @@ where
        fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
                self.process_chain_data(header, None, txdata, |monitor, txdata| {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        monitor.transactions_confirmed(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
                });
        }
 
@@ -679,20 +675,19 @@ where
                log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
                let monitor_states = self.monitors.read().unwrap();
                for monitor_state in monitor_states.values() {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
-                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &logger);
+                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
                }
        }
 
        fn best_block_updated(&self, header: &Header, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
-                       let logger = WithChannelMonitor::from(&self.logger, &monitor);
                        // While in practice there shouldn't be any recursive calls when given empty txdata,
                        // it's still possible if a chain::Filter implementation returns a transaction.
                        debug_assert!(txdata.is_empty());
                        monitor.best_block_updated(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &logger)
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
+                       )
                });
        }
 
@@ -758,8 +753,7 @@ where C::Target: chain::Filter,
 
        fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
                // Update the monitor that watches the channel referred to by the given outpoint.
-               let monitors_lock = self.monitors.read().unwrap();
-               let monitors = monitors_lock.deref();
+               let monitors = self.monitors.read().unwrap();
                match monitors.get(&funding_txo) {
                        None => {
                                log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
@@ -802,6 +796,7 @@ where C::Target: chain::Filter,
                                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                                // Take the monitors lock for writing so that we poison it and any future
                                                // operations going forward fail immediately.
+                                               core::mem::drop(pending_monitor_updates);
                                                core::mem::drop(monitors);
                                                let _poison = self.monitors.write().unwrap();
                                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
index e0f19f39d4761f415fb7370ba25802b3285325d0..ce1ef9128f91efe8327f2f874daca511e7c6e4f1 100644 (file)
@@ -1139,12 +1139,16 @@ impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger
        }
 }
 
-impl<'a, 'b, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
-       pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &'b ChannelMonitor<S>) -> Self {
+impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
+       pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &ChannelMonitor<S>) -> Self {
+               Self::from_impl(logger, &*monitor.inner.lock().unwrap())
+       }
+
+       pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
+               let peer_id = monitor_impl.counterparty_node_id;
+               let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
                WithChannelMonitor {
-                       logger,
-                       peer_id: monitor.get_counterparty_node_id(),
-                       channel_id: Some(monitor.get_funding_txo().0.to_channel_id()),
+                       logger, peer_id, channel_id,
                }
        }
 }
@@ -1282,9 +1286,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        )
        where L::Target: Logger
        {
-               self.inner.lock().unwrap().provide_initial_counterparty_commitment_tx(txid,
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_initial_counterparty_commitment_tx(txid,
                        htlc_outputs, commitment_number, their_cur_per_commitment_point, feerate_per_kw,
-                       to_broadcaster_value_sat, to_countersignatory_value_sat, logger);
+                       to_broadcaster_value_sat, to_countersignatory_value_sat, &logger);
        }
 
        /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
@@ -1300,8 +1306,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                their_per_commitment_point: PublicKey,
                logger: &L,
        ) where L::Target: Logger {
-               self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx(
-                       txid, htlc_outputs, commitment_number, their_per_commitment_point, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_latest_counterparty_commitment_tx(
+                       txid, htlc_outputs, commitment_number, their_per_commitment_point, &logger)
        }
 
        #[cfg(test)]
@@ -1326,8 +1334,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().provide_payment_preimage(
-                       payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_payment_preimage(
+                       payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
        }
 
        /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
@@ -1346,7 +1356,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().update_monitor(updates, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.update_monitor(updates, broadcaster, fee_estimator, &logger)
        }
 
        /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
@@ -1525,7 +1537,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
-               self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.get_latest_holder_commitment_txn(&logger)
        }
 
        /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
@@ -1534,7 +1548,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
        pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
-               self.inner.lock().unwrap().unsafe_get_latest_holder_commitment_txn(logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.unsafe_get_latest_holder_commitment_txn(&logger)
        }
 
        /// Processes transactions in a newly connected block, which may result in any of the following:
@@ -1555,15 +1571,17 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().block_connected(
-                       header, txdata, height, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.block_connected(
+                       header, txdata, height, broadcaster, fee_estimator, &logger)
        }
 
        /// Determines if the disconnected block contained any transactions of interest and updates
@@ -1574,14 +1592,16 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().block_disconnected(
-                       header, height, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.block_disconnected(
+                       header, height, broadcaster, fee_estimator, &logger)
        }
 
        /// Processes transactions confirmed in a block with the given header and height, returning new
@@ -1598,7 +1618,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -1606,8 +1626,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().transactions_confirmed(
-                       header, txdata, height, broadcaster, &bounded_fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.transactions_confirmed(
+                       header, txdata, height, broadcaster, &bounded_fee_estimator, &logger)
        }
 
        /// Processes a transaction that was reorganized out of the chain.
@@ -1621,15 +1643,18 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                txid: &Txid,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().transaction_unconfirmed(
-                       txid, broadcaster, &bounded_fee_estimator, logger);
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.transaction_unconfirmed(
+                       txid, broadcaster, &bounded_fee_estimator, &logger
+               );
        }
 
        /// Updates the monitor with the current best chain tip, returning new outputs to watch. See
@@ -1645,7 +1670,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -1653,8 +1678,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().best_block_updated(
-                       header, height, broadcaster, &bounded_fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.best_block_updated(
+                       header, height, broadcaster, &bounded_fee_estimator, &logger
+               )
        }
 
        /// Returns the set of txids that should be monitored for re-organization out of the chain.
@@ -1682,7 +1710,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// invoking this every 30 seconds, or lower if running in an environment with spotty
        /// connections, like on mobile.
        pub fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
-               &self, broadcaster: B, fee_estimator: F, logger: L,
+               &self, broadcaster: B, fee_estimator: F, logger: &L,
        )
        where
                B::Target: BroadcasterInterface,
@@ -1691,6 +1719,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        {
                let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
                let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
                let current_height = inner.best_block.height;
                inner.onchain_tx_handler.rebroadcast_pending_claims(
                        current_height, &broadcaster, &fee_estimator, &logger,
@@ -2399,13 +2428,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                Ok(())
        }
 
-       pub(crate) fn provide_initial_counterparty_commitment_tx<L: Deref>(
+       fn provide_initial_counterparty_commitment_tx<L: Deref>(
                &mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64, their_per_commitment_point: PublicKey, feerate_per_kw: u32,
-               to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &L
-       )
-       where L::Target: Logger
-       {
+               to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                self.initial_counterparty_commitment_info = Some((their_per_commitment_point.clone(),
                        feerate_per_kw, to_broadcaster_value, to_countersignatory_value));
 
@@ -2418,7 +2445,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                their_per_commitment_point, logger);
        }
 
-       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger {
+       fn provide_latest_counterparty_commitment_tx<L: Deref>(
+               &mut self, txid: Txid,
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+               commitment_number: u64, their_per_commitment_point: PublicKey, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
                // so that a remote monitor doesn't learn anything unless there is a malicious close.
                // (only maybe, sadly we cant do the same for local info, as we need to be aware of
@@ -2551,7 +2582,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
        /// commitment_tx_infos which contain the payment hash have been revoked.
        fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
                &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B,
-               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>)
        where B::Target: BroadcasterInterface,
                    F::Target: FeeEstimator,
                    L::Target: Logger,
@@ -2628,9 +2659,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
-       pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
+       fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &WithChannelMonitor<L>)
                where B::Target: BroadcasterInterface,
-                                       L::Target: Logger,
+                       L::Target: Logger,
        {
                let commit_txs = self.get_latest_holder_commitment_txn(logger);
                let mut txs = vec![];
@@ -2642,7 +2673,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
        }
 
-       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
+       fn update_monitor<B: Deref, F: Deref, L: Deref>(
+               &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor<L>
+       ) -> Result<(), ()>
        where B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
@@ -2787,15 +2820,15 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                } else { ret }
        }
 
-       pub fn get_latest_update_id(&self) -> u64 {
+       fn get_latest_update_id(&self) -> u64 {
                self.latest_update_id
        }
 
-       pub fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
+       fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
                &self.funding_info
        }
 
-       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
+       fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
                // If we've detected a counterparty commitment tx on chain, we must include it in the set
                // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
                // its trivial to do, double-check that here.
@@ -2805,7 +2838,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                &self.outputs_to_watch
        }
 
-       pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
+       fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
                let mut ret = Vec::new();
                mem::swap(&mut ret, &mut self.pending_monitor_events);
                ret
@@ -2880,7 +2913,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                ret
        }
 
-       pub(crate) fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
+       fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
                let (their_per_commitment_point, feerate_per_kw, to_broadcaster_value,
                        to_countersignatory_value) = self.initial_counterparty_commitment_info?;
                let htlc_outputs = vec![];
@@ -2914,7 +2947,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        channel_parameters)
        }
 
-       pub(crate) fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
+       fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
                update.updates.iter().filter_map(|update| {
                        match update {
                                &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid,
@@ -2940,7 +2973,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }).collect()
        }
 
-       pub(crate) fn sign_to_local_justice_tx(
+       fn sign_to_local_justice_tx(
                &self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64
        ) -> Result<Transaction, ()> {
                let secret = self.get_secret(commitment_number).ok_or(())?;
@@ -2968,15 +3001,15 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                self.commitment_secrets.get_secret(idx)
        }
 
-       pub(crate) fn get_min_seen_secret(&self) -> u64 {
+       fn get_min_seen_secret(&self) -> u64 {
                self.commitment_secrets.get_min_seen_secret()
        }
 
-       pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 {
+       fn get_cur_counterparty_commitment_number(&self) -> u64 {
                self.current_counterparty_commitment_number
        }
 
-       pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 {
+       fn get_cur_holder_commitment_number(&self) -> u64 {
                self.current_holder_commitment_number
        }
 
@@ -3323,7 +3356,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
-       pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+       fn get_latest_holder_commitment_txn<L: Deref>(
+               &mut self, logger: &WithChannelMonitor<L>,
+       ) -> Vec<Transaction> where L::Target: Logger {
                log_debug!(logger, "Getting signed latest holder commitment transaction!");
                self.holder_tx_signed = true;
                let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
@@ -3362,7 +3397,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
        #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
        /// Note that this includes possibly-locktimed-in-the-future transactions!
-       fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+       fn unsafe_get_latest_holder_commitment_txn<L: Deref>(
+               &mut self, logger: &WithChannelMonitor<L>
+       ) -> Vec<Transaction> where L::Target: Logger {
                log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
                let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
                let txid = commitment_tx.txid();
@@ -3389,10 +3426,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                holder_transactions
        }
 
-       pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L) -> Vec<TransactionOutputs>
+       fn block_connected<B: Deref, F: Deref, L: Deref>(
+               &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B,
+               fee_estimator: F, logger: &WithChannelMonitor<L>,
+       ) -> Vec<TransactionOutputs>
                where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                                       L::Target: Logger,
+                       F::Target: FeeEstimator,
+                       L::Target: Logger,
        {
                let block_hash = header.block_hash();
                self.best_block = BestBlock::new(block_hash, height);
@@ -3407,7 +3447,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3418,7 +3458,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                if height > self.best_block.height() {
                        self.best_block = BestBlock::new(block_hash, height);
-                       self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
+                       self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
                } else if block_hash != self.best_block.block_hash() {
                        self.best_block = BestBlock::new(block_hash, height);
                        self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
@@ -3434,7 +3474,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3551,9 +3591,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                break;
                                        }
                                }
-                               self.is_resolving_htlc_output(&tx, height, &block_hash, &logger);
+                               self.is_resolving_htlc_output(&tx, height, &block_hash, logger);
 
-                               self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, &logger);
+                               self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger);
                        }
                }
 
@@ -3561,7 +3601,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        self.best_block = BestBlock::new(block_hash, height);
                }
 
-               self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
+               self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger)
        }
 
        /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update
@@ -3581,7 +3621,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                mut claimable_outpoints: Vec<PackageTemplate>,
                broadcaster: &B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: &L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3726,10 +3766,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                watch_outputs
        }
 
-       pub fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: L)
-               where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                     L::Target: Logger,
+       fn block_disconnected<B: Deref, F: Deref, L: Deref>(
+               &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor<L>
+       ) where B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
        {
                log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height);
 
@@ -3749,7 +3790,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                txid: &Txid,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -3828,7 +3869,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                false
        }
 
-       fn should_broadcast_holder_commitment_txn<L: Deref>(&self, logger: &L) -> bool where L::Target: Logger {
+       fn should_broadcast_holder_commitment_txn<L: Deref>(
+               &self, logger: &WithChannelMonitor<L>
+       ) -> bool where L::Target: Logger {
                // There's no need to broadcast our commitment transaction if we've seen one confirmed (even
                // with 1 confirmation) as it'll be rejected as duplicate/conflicting.
                if self.funding_spend_confirmed.is_some() ||
@@ -3904,7 +3947,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
        /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
        /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
-       fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger {
+       fn is_resolving_htlc_output<L: Deref>(
+               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                'outer_loop: for input in &tx.input {
                        let mut payment_data = None;
                        let htlc_claim = HTLCClaim::from_witness(&input.witness);
@@ -4145,7 +4190,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
        /// Checks if the confirmed transaction is paying funds back to some address we can assume to
        /// own.
        fn check_tx_and_push_spendable_outputs<L: Deref>(
-               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L,
+               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
        ) where L::Target: Logger {
                for spendable_output in self.get_spendable_outputs(tx) {
                        let entry = OnchainEventEntry {
@@ -4168,11 +4213,11 @@ where
        L::Target: Logger,
 {
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
+               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3);
        }
 
        fn block_disconnected(&self, header: &Header, height: u32) {
-               self.0.block_disconnected(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
+               self.0.block_disconnected(header, height, &*self.1, &*self.2, &self.3);
        }
 }
 
@@ -4184,15 +4229,15 @@ where
        L::Target: Logger,
 {
        fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
+               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3);
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
+               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3);
        }
 
        fn best_block_updated(&self, header: &Header, height: u32) {
-               self.0.best_block_updated(header, height, &*self.1, &*self.2, &WithChannelMonitor::from(&self.3, &self.0));
+               self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3);
        }
 
        fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
index 2871abbc22312d5f9e4f31aa5b9ec4fd49ddbf9d..bbed782bb57bd0a2e6da0fc4e9ca65ddadde6d5f 100644 (file)
@@ -473,14 +473,13 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
        /// invoking this every 30 seconds, or lower if running in an environment with spotty
        /// connections, like on mobile.
-       pub(crate) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Logger>(
                &mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>,
                logger: &L,
        )
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len());
                for (claim_id, request) in self.pending_claim_requests.iter() {
@@ -528,13 +527,11 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        ///
        /// Panics if there are signing errors, because signing operations in reaction to on-chain
        /// events are not expected to fail, and if they do, we may lose funds.
-       fn generate_claim<F: Deref, L: Deref>(
+       fn generate_claim<F: Deref, L: Logger>(
                &mut self, cur_height: u32, cached_request: &PackageTemplate, force_feerate_bump: bool,
                fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
        ) -> Option<(u32, u64, OnchainClaim)>
-       where
-               F::Target: FeeEstimator,
-               L::Target: Logger,
+       where F::Target: FeeEstimator,
        {
                let request_outpoints = cached_request.outpoints();
                if request_outpoints.is_empty() {
@@ -688,13 +685,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// `conf_height` represents the height at which the request was generated. This
        /// does not need to equal the current blockchain tip height, which should be provided via
        /// `cur_height`, however it must never be higher than `cur_height`.
-       pub(crate) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
                &mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
                broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
                let mut preprocessed_requests = Vec::with_capacity(requests.len());
@@ -809,13 +805,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// `conf_height` represents the height at which the transactions in `txn_matched` were
        /// confirmed. This does not need to equal the current blockchain tip height, which should be
        /// provided via `cur_height`, however it must never be higher than `cur_height`.
-       pub(crate) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Logger>(
                &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash,
                cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
                let mut bump_candidates = HashMap::new();
@@ -977,16 +972,15 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                }
        }
 
-       pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn transaction_unconfirmed<B: Deref, F: Deref, L: Logger>(
                &mut self,
                txid: &Txid,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                let mut height = None;
                for entry in self.onchain_events_awaiting_threshold_conf.iter() {
@@ -1001,10 +995,9 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                }
        }
 
-       pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L)
+       pub(super) fn block_disconnected<B: Deref, F: Deref, L: Logger>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
                where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                                       L::Target: Logger,
+                       F::Target: FeeEstimator,
        {
                let mut bump_candidates = HashMap::new();
                let onchain_events_awaiting_threshold_conf =
@@ -1034,7 +1027,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                        // `height` is the height being disconnected, so our `current_height` is 1 lower.
                        let current_height = height - 1;
                        if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
-                               current_height, &request, true /* force_feerate_bump */, fee_estimator, &&*logger
+                               current_height, &request, true /* force_feerate_bump */, fee_estimator, logger
                        ) {
                                request.set_timer(new_timer);
                                request.set_feerate(new_feerate);
index 7c488816048e3190b382315ee158c0d8c49cd2b8..efc32bf7d40d9adcdfaff19ef23e3dc66b363bd7 100644 (file)
@@ -908,10 +908,10 @@ impl PackageTemplate {
                }
                htlcs
        }
-       pub(crate) fn finalize_malleable_package<L: Deref, Signer: WriteableEcdsaChannelSigner>(
+       pub(crate) fn finalize_malleable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
                &self, current_height: u32, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64,
                destination_script: ScriptBuf, logger: &L
-       ) -> Option<Transaction> where L::Target: Logger {
+       ) -> Option<Transaction> {
                debug_assert!(self.is_malleable());
                let mut bumped_tx = Transaction {
                        version: 2,
@@ -932,9 +932,9 @@ impl PackageTemplate {
                log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
                Some(bumped_tx)
        }
-       pub(crate) fn finalize_untractable_package<L: Deref, Signer: WriteableEcdsaChannelSigner>(
+       pub(crate) fn finalize_untractable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
                &self, onchain_handler: &mut OnchainTxHandler<Signer>, logger: &L,
-       ) -> Option<Transaction> where L::Target: Logger {
+       ) -> Option<Transaction> {
                debug_assert!(!self.is_malleable());
                if let Some((outpoint, outp)) = self.inputs.first() {
                        if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
@@ -962,13 +962,11 @@ impl PackageTemplate {
        /// Returns value in satoshis to be included as package outgoing output amount and feerate
        /// which was used to generate the value. Will not return less than `dust_limit_sats` for the
        /// value.
-       pub(crate) fn compute_package_output<F: Deref, L: Deref>(
+       pub(crate) fn compute_package_output<F: Deref, L: Logger>(
                &self, predicted_weight: u64, dust_limit_sats: u64, force_feerate_bump: bool,
                fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
        ) -> Option<(u64, u64)>
-       where
-               F::Target: FeeEstimator,
-               L::Target: Logger,
+       where F::Target: FeeEstimator,
        {
                debug_assert!(self.malleability == PackageMalleability::Malleable, "The package output is fixed for non-malleable packages");
                let input_amounts = self.package_amount();
@@ -1111,9 +1109,8 @@ impl Readable for PackageTemplate {
 ///
 /// [`OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep
 /// [`FEERATE_FLOOR_SATS_PER_KW`]: crate::chain::chaininterface::MIN_RELAY_FEE_SAT_PER_1000_WEIGHT
-fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(u64, u64)>
+fn compute_fee_from_spent_amounts<F: Deref, L: Logger>(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(u64, u64)>
        where F::Target: FeeEstimator,
-             L::Target: Logger,
 {
        let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep);
        let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight));
@@ -1135,13 +1132,12 @@ fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predic
 /// feerate, or just use the previous feerate otherwise. If a feerate bump did happen, we also
 /// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust the
 /// new fee to meet the RBF policy requirement.
-fn feerate_bump<F: Deref, L: Deref>(
+fn feerate_bump<F: Deref, L: Logger>(
        predicted_weight: u64, input_amounts: u64, previous_feerate: u64, force_feerate_bump: bool,
        fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
 ) -> Option<(u64, u64)>
 where
        F::Target: FeeEstimator,
-       L::Target: Logger,
 {
        // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
        let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
index 4e04a3634e10d3f9152ae6d074bb19365bf989a4..76e5f25c0e5f6eef87b2ff48102dd19acc239231 100644 (file)
@@ -530,6 +530,25 @@ pub enum Event {
                /// serialized prior to LDK version 0.0.117.
                sender_intended_total_msat: Option<u64>,
        },
+       /// Indicates that a peer connection with a node is needed in order to send an [`OnionMessage`].
+       ///
+       /// Typically, this happens when a [`MessageRouter`] is unable to find a complete path to a
+       /// [`Destination`]. Once a connection is established, any messages buffered by an
+       /// [`OnionMessageHandler`] may be sent.
+       ///
+       /// This event will not be generated for onion message forwards; only for sends including
+       /// replies. Handlers should connect to the node otherwise any buffered messages may be lost.
+       ///
+       /// [`OnionMessage`]: msgs::OnionMessage
+       /// [`MessageRouter`]: crate::onion_message::MessageRouter
+       /// [`Destination`]: crate::onion_message::Destination
+       /// [`OnionMessageHandler`]: crate::ln::msgs::OnionMessageHandler
+       ConnectionNeeded {
+               /// The node id for the node needing a connection.
+               node_id: PublicKey,
+               /// Sockets for connecting to the node.
+               addresses: Vec<msgs::SocketAddress>,
+       },
        /// Indicates a request for an invoice failed to yield a response in a reasonable amount of time
        /// or was explicitly abandoned by [`ChannelManager::abandon_payment`]. This may be for an
        /// [`InvoiceRequest`] sent for an [`Offer`] or for a [`Refund`] that hasn't been redeemed.
@@ -1190,6 +1209,10 @@ impl Writeable for Event {
                                        (0, payment_id, required),
                                })
                        },
+                       &Event::ConnectionNeeded { .. } => {
+                               35u8.write(writer)?;
+                               // Never write ConnectionNeeded events as buffered onion messages aren't serialized.
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@ -1200,8 +1223,7 @@ impl Writeable for Event {
 impl MaybeReadable for Event {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
                match Readable::read(reader)? {
-                       // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
-                       // unlike all other events, thus we return immediately here.
+                       // Note that we do not write a length-prefixed TLV for FundingGenerationReady events.
                        0u8 => Ok(None),
                        1u8 => {
                                let f = || {
@@ -1588,6 +1610,8 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       // Note that we do not write a length-prefixed TLV for ConnectionNeeded events.
+                       35u8 => Ok(None),
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
index f01454bd5bd54e9c3d04be127cca0a860e95e9f0..582f67878e60d8e59d4a36b567e12a8fcded0798 100644 (file)
@@ -35,7 +35,7 @@ use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
 use crate::chain::BestBlock;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
@@ -261,76 +261,293 @@ enum HTLCUpdateAwaitingACK {
        },
 }
 
-/// There are a few "states" and then a number of flags which can be applied:
-/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
-/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
-/// move on to `ChannelReady`.
-/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
-/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
-/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
+macro_rules! define_state_flags {
+       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+               #[doc = $flag_type_doc]
+               #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
+               struct $flag_type(u32);
+
+               impl $flag_type {
+                       $(
+                               #[doc = $flag_doc]
+                               const $flag: $flag_type = $flag_type($value);
+                       )*
+
+                       /// All flags that apply to the specified [`ChannelState`] variant.
+                       #[allow(unused)]
+                       const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
+
+                       #[allow(unused)]
+                       fn new() -> Self { Self(0) }
+
+                       #[allow(unused)]
+                       fn from_u32(flags: u32) -> Result<Self, ()> {
+                               if flags & !Self::ALL.0 != 0 {
+                                       Err(())
+                               } else {
+                                       Ok($flag_type(flags))
+                               }
+                       }
+
+                       #[allow(unused)]
+                       fn is_empty(&self) -> bool { self.0 == 0 }
+
+                       #[allow(unused)]
+                       fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+               }
+
+               impl core::ops::Not for $flag_type {
+                       type Output = Self;
+                       fn not(self) -> Self::Output { Self(!self.0) }
+               }
+               impl core::ops::BitOr for $flag_type {
+                       type Output = Self;
+                       fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
+               }
+               impl core::ops::BitOrAssign for $flag_type {
+                       fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
+               }
+               impl core::ops::BitAnd for $flag_type {
+                       type Output = Self;
+                       fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
+               }
+               impl core::ops::BitAndAssign for $flag_type {
+                       fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
+               }
+       };
+       ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
+               define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
+       };
+       ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
+               define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+               impl core::ops::BitOr<FundedStateFlags> for $flag_type {
+                       type Output = Self;
+                       fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
+               }
+               impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
+                       fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
+               }
+               impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
+                       type Output = Self;
+                       fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
+               }
+               impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
+                       fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
+               }
+               impl PartialEq<FundedStateFlags> for $flag_type {
+                       fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
+               }
+               impl From<FundedStateFlags> for $flag_type {
+                       fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
+               }
+       };
+}
+
+/// We declare all the states/flags here together to help determine which bits are still available
+/// to choose.
+mod state_flags {
+       pub const OUR_INIT_SENT: u32 = 1 << 0;
+       pub const THEIR_INIT_SENT: u32 = 1 << 1;
+       pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
+       pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
+       pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
+       pub const OUR_CHANNEL_READY: u32 = 1 << 5;
+       pub const CHANNEL_READY: u32 = 1 << 6;
+       pub const PEER_DISCONNECTED: u32 = 1 << 7;
+       pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
+       pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
+       pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
+       pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
+       pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
+       pub const WAITING_FOR_BATCH: u32 = 1 << 13;
+}
+
+define_state_flags!(
+       "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
+       FundedStateFlags, [
+               ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
+                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+               ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
+                       somewhere and we should pause sending any outbound messages until they've managed to \
+                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+               ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
+                       any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
+                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+               ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
+                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
+       NegotiatingFundingFlags, [
+               ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
+                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+               ("Indicates we have received their `open_channel`/`accept_channel` message.",
+                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
+       FUNDED_STATE, AwaitingChannelReadyFlags, [
+               ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+                       `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+               ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+                       `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+               ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
+                       is being held until all channels in the batch have received `funding_signed` and have \
+                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::ChannelReady`].",
+       FUNDED_STATE, ChannelReadyFlags, [
+               ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
+                       `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
+                       messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
+                       implicit ACK, so instead we have to hold them away temporarily to be sent later.",
+                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+       ]
+);
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
 enum ChannelState {
-       /// Implies we have (or are prepared to) send our open_channel/accept_channel message
-       OurInitSent = 1 << 0,
-       /// Implies we have received their `open_channel`/`accept_channel` message
-       TheirInitSent = 1 << 1,
-       /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
-       /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
-       /// upon receipt of `funding_created`, so simply skip this state.
-       FundingCreated = 4,
-       /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
-       /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
-       /// and our counterparty consider the funding transaction confirmed.
-       FundingSent = 8,
-       /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
-       /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
-       TheirChannelReady = 1 << 4,
-       /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
-       /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
-       OurChannelReady = 1 << 5,
-       ChannelReady = 64,
-       /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
-       /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
-       /// dance.
-       PeerDisconnected = 1 << 7,
-       /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
-       /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
-       /// sending any outbound messages until they've managed to finish.
-       MonitorUpdateInProgress = 1 << 8,
-       /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
-       /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
-       /// messages as then we will be unable to determine which HTLCs they included in their
-       /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
-       /// later.
-       /// Flag is set on `ChannelReady`.
-       AwaitingRemoteRevoke = 1 << 9,
-       /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
-       /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
-       /// to respond with our own shutdown message when possible.
-       RemoteShutdownSent = 1 << 10,
-       /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
-       /// point, we may not add any new HTLCs to the channel.
-       LocalShutdownSent = 1 << 11,
-       /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
-       /// to drop us, but we store this anyway.
-       ShutdownComplete = 4096,
-       /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
-       /// broadcasting of the funding transaction is being held until all channels in the batch
-       /// have received funding_signed and have their monitors persisted.
-       WaitingForBatch = 1 << 13,
+       /// We are negotiating the parameters required for the channel prior to funding it.
+       NegotiatingFunding(NegotiatingFundingFlags),
+       /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
+       /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
+       /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
+       FundingNegotiated,
+       /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
+       /// funding transaction to confirm.
+       AwaitingChannelReady(AwaitingChannelReadyFlags),
+       /// Both we and our counterparty consider the funding transaction confirmed and the channel is
+       /// now operational.
+       ChannelReady(ChannelReadyFlags),
+       /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
+       /// is about to drop us, but we store this anyway.
+       ShutdownComplete,
+}
+
+macro_rules! impl_state_flag {
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+               #[allow(unused)]
+               fn $get(&self) -> bool {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+                               )*
+                               _ => false,
+                       }
+               }
+               #[allow(unused)]
+               fn $set(&mut self) {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => *flags |= $state_flag,
+                               )*
+                               _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
+                       }
+               }
+               #[allow(unused)]
+               fn $clear(&mut self) {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => *flags &= !($state_flag),
+                               )*
+                               _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
+                       }
+               }
+       };
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
+               impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+       };
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
+               impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+       };
+}
+
+impl ChannelState {
+       fn from_u32(state: u32) -> Result<Self, ()> {
+               match state {
+                       state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
+                       state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
+                       val => {
+                               if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
+                                       AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
+                                               .map(|flags| ChannelState::AwaitingChannelReady(flags))
+                               } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
+                                       ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
+                                               .map(|flags| ChannelState::ChannelReady(flags))
+                               } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
+                                       Ok(ChannelState::NegotiatingFunding(flags))
+                               } else {
+                                       Err(())
+                               }
+                       },
+               }
+       }
+
+       fn to_u32(&self) -> u32 {
+               match self {
+                       ChannelState::NegotiatingFunding(flags) => flags.0,
+                       ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
+                       ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
+                       ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
+                       ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
+               }
+       }
+
+       fn is_pre_funded_state(&self) -> bool {
+               matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
+       }
+
+       fn is_both_sides_shutdown(&self) -> bool {
+               self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
+       }
+
+       fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
+               match self {
+                       ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+                       ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+                       _ => FundedStateFlags::new(),
+               }
+       }
+
+       fn should_force_holding_cell(&self) -> bool {
+               match self {
+                       ChannelState::ChannelReady(flags) =>
+                               flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
+                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
+                                       flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+                       _ => {
+                               debug_assert!(false, "The holding cell is only valid within ChannelReady");
+                               false
+                       },
+               }
+       }
+
+       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
+               FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
+       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
+               FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
+       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
+               FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
+       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
+               FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
+       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
+               AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
+       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
+               AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
+       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
+               AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
+       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
+               ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
 }
-const BOTH_SIDES_SHUTDOWN_MASK: u32 =
-       ChannelState::LocalShutdownSent as u32 |
-       ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 =
-       BOTH_SIDES_SHUTDOWN_MASK |
-       ChannelState::PeerDisconnected as u32 |
-       ChannelState::MonitorUpdateInProgress as u32;
-const STATE_FLAGS: u32 =
-       MULTI_STATE_FLAGS |
-       ChannelState::TheirChannelReady as u32 |
-       ChannelState::OurChannelReady as u32 |
-       ChannelState::AwaitingRemoteRevoke as u32 |
-       ChannelState::WaitingForBatch as u32;
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -576,7 +793,6 @@ pub(super) struct MonitorRestoreUpdates {
 pub(super) struct SignerResumeUpdates {
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub funding_signed: Option<msgs::FundingSigned>,
-       pub funding_created: Option<msgs::FundingCreated>,
        pub channel_ready: Option<msgs::ChannelReady>,
 }
 
@@ -743,7 +959,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
        /// Will be `None` for channels created prior to 0.0.115.
        temporary_channel_id: Option<ChannelId>,
-       channel_state: u32,
+       channel_state: ChannelState,
 
        // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
        // our peer. However, we want to make sure they received it, or else rebroadcast it when we
@@ -1040,49 +1256,55 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns true if we've ever received a message from the remote end for this Channel
        pub fn have_received_message(&self) -> bool {
-               self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
+               self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
        }
 
        /// Returns true if this channel is fully established and not known to be closing.
        /// Allowed in any state (including after shutdown)
        pub fn is_usable(&self) -> bool {
-               let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
-               (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
+               matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+                       !self.channel_state.is_local_shutdown_sent() &&
+                       !self.channel_state.is_remote_shutdown_sent() &&
+                       !self.monitor_pending_channel_ready
        }
 
        /// shutdown state returns the state of the channel in its various stages of shutdown
        pub fn shutdown_state(&self) -> ChannelShutdownState {
-               if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
-                       return ChannelShutdownState::ShutdownComplete;
-               }
-               if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
-                       return ChannelShutdownState::ShutdownInitiated;
-               }
-               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
-                       return ChannelShutdownState::ResolvingHTLCs;
-               }
-               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
-                       return ChannelShutdownState::NegotiatingClosingFee;
+               match self.channel_state {
+                       ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
+                               if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+                                       ChannelShutdownState::ShutdownInitiated
+                               } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+                                       ChannelShutdownState::ResolvingHTLCs
+                               } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+                                       ChannelShutdownState::NegotiatingClosingFee
+                               } else {
+                                       ChannelShutdownState::NotShuttingDown
+                               },
+                       ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+                       _ => ChannelShutdownState::NotShuttingDown,
                }
-               return ChannelShutdownState::NotShuttingDown;
        }
 
        fn closing_negotiation_ready(&self) -> bool {
+               let is_ready_to_close = match self.channel_state {
+                       ChannelState::AwaitingChannelReady(flags) =>
+                               flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       ChannelState::ChannelReady(flags) =>
+                               flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       _ => false,
+               };
                self.pending_inbound_htlcs.is_empty() &&
-               self.pending_outbound_htlcs.is_empty() &&
-               self.pending_update_fee.is_none() &&
-               self.channel_state &
-               (BOTH_SIDES_SHUTDOWN_MASK |
-                       ChannelState::AwaitingRemoteRevoke as u32 |
-                       ChannelState::PeerDisconnected as u32 |
-                       ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+                       self.pending_outbound_htlcs.is_empty() &&
+                       self.pending_update_fee.is_none() &&
+                       is_ready_to_close
        }
 
        /// Returns true if this channel is currently available for use. This is a superset of
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
        pub fn is_live(&self) -> bool {
-               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
+               self.is_usable() && !self.channel_state.is_peer_disconnected()
        }
 
        // Public utilities:
@@ -1334,8 +1556,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// Returns true if funding_signed was sent/received and the
        /// funding transaction has been broadcast if necessary.
        pub fn is_funding_broadcast(&self) -> bool {
-               self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
-                       self.channel_state & ChannelState::WaitingForBatch as u32 == 0
+               !self.channel_state.is_pre_funded_state() &&
+                       !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
        }
 
        /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
@@ -2091,11 +2313,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
                where F: Fn() -> Option<O> {
-               if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
-                  self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
-                       f()
-               } else {
-                       None
+               match self.channel_state {
+                       ChannelState::FundingNegotiated => f(),
+                       ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
+                               f()
+                       } else {
+                               None
+                       },
+                       _ => None,
                }
        }
 
@@ -2134,7 +2359,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
                // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
-               assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+               assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
 
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
                // return them to fail the payment.
@@ -2149,14 +2374,18 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
                let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
-                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+                       // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
                        // returning a channel monitor update here would imply a channel monitor update before
                        // we even registered the channel monitor to begin with, which is invalid.
                        // Thus, if we aren't actually at a point where we could conceivably broadcast the
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+                       let generate_monitor_update = match self.channel_state {
+                               ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
+                               _ => false,
+                       };
+                       if generate_monitor_update {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
                                Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
@@ -2166,7 +2395,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                } else { None };
                let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
 
-               self.channel_state = ChannelState::ShutdownComplete as u32;
+               self.channel_state = ChannelState::ShutdownComplete;
                self.update_time_counter += 1;
                ShutdownResult {
                        monitor_update,
@@ -2177,38 +2406,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                }
        }
 
-       /// Only allowed after [`Self::channel_transaction_parameters`] is set.
-       fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
-               let counterparty_keys = self.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               let signature = match &self.holder_signer {
-                       // TODO (taproot|arik): move match into calling method for Taproot
-                       ChannelSignerType::Ecdsa(ecdsa) => {
-                               ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
-                                       .map(|(sig, _)| sig).ok()?
-                       },
-                       // TODO (taproot|arik)
-                       #[cfg(taproot)]
-                       _ => todo!()
-               };
-
-               if self.signer_pending_funding {
-                       log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
-                       self.signer_pending_funding = false;
-               }
-
-               Some(msgs::FundingCreated {
-                       temporary_channel_id: self.temporary_channel_id.unwrap(),
-                       funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
-                       funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
-               })
-       }
-
        /// Only allowed after [`Self::channel_transaction_parameters`] is set.
        fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
                let counterparty_keys = self.build_remote_transaction_keys();
@@ -2437,7 +2634,7 @@ impl<SP: Deref> Channel<SP> where
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
-               assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+               assert!(self.context.channel_state.should_force_holding_cell());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
@@ -2451,10 +2648,9 @@ impl<SP: Deref> Channel<SP> where
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
                // either.
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                // ChannelManager may generate duplicate claims/fails due to HTLC update events from
                // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
@@ -2507,7 +2703,7 @@ impl<SP: Deref> Channel<SP> where
                        }],
                };
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.should_force_holding_cell() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
@@ -2535,7 +2731,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
@@ -2630,10 +2826,9 @@ impl<SP: Deref> Channel<SP> where
        /// [`ChannelError::Ignore`].
        fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
        -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                // ChannelManager may generate duplicate claims/fails due to HTLC update events from
                // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
@@ -2667,7 +2862,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.should_force_holding_cell() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
@@ -2714,107 +2909,13 @@ impl<SP: Deref> Channel<SP> where
        }
 
        // Message handlers:
-
-       /// Handles a funding_signed message from the remote end.
-       /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<L: Deref>(
-               &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
-       where
-               L::Target: Logger
-       {
-               if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
-               }
-               if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
-                       return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
-               }
-               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
-                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
-               }
-
-               let funding_script = self.context.get_funding_redeemscript();
-
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
-               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
-
-               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
-
-               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
-               {
-                       let trusted_tx = initial_commitment_tx.trust();
-                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
-                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
-                       // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
-                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
-                               return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
-                       }
-               }
-
-               let holder_commitment_tx = HolderCommitmentTransaction::new(
-                       initial_commitment_tx,
-                       msg.signature,
-                       Vec::new(),
-                       &self.context.get_holder_pubkeys().funding_pubkey,
-                       self.context.counterparty_funding_pubkey()
-               );
-
-               self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
-
-               let funding_redeemscript = self.context.get_funding_redeemscript();
-               let funding_txo = self.context.get_funding_txo().unwrap();
-               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
-               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
-               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
-               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
-               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
-                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
-                                                         &self.context.destination_script, (funding_txo, funding_txo_script),
-                                                         &self.context.channel_transaction_parameters,
-                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
-                                                         obscure_factor,
-                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
-               let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
-               channel_monitor.provide_initial_counterparty_commitment_tx(
-                       counterparty_initial_bitcoin_tx.txid, Vec::new(),
-                       self.context.cur_counterparty_commitment_transaction_number,
-                       self.context.counterparty_cur_commitment_point.unwrap(),
-                       counterparty_initial_commitment_tx.feerate_per_kw(),
-                       counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
-                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
-
-               assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
-               if self.context.is_batch_funding() {
-                       self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
-               } else {
-                       self.context.channel_state = ChannelState::FundingSent as u32;
-               }
-               self.context.cur_holder_commitment_transaction_number -= 1;
-               self.context.cur_counterparty_commitment_transaction_number -= 1;
-
-               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
-
-               let need_channel_ready = self.check_get_channel_ready(0).is_some();
-               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
-               Ok(channel_monitor)
-       }
-
        /// Updates the state of the channel to indicate that all channels in the batch have received
        /// funding_signed and persisted their monitors.
        /// The funding transaction is consequently allowed to be broadcast, and the channel can be
        /// treated as a non-batch channel going forward.
        pub fn set_batch_ready(&mut self) {
                self.context.is_batch_funding = None;
-               self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+               self.context.channel_state.clear_waiting_for_batch();
        }
 
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
@@ -2828,7 +2929,7 @@ impl<SP: Deref> Channel<SP> where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        self.context.workaround_lnd_bug_4006 = Some(msg.clone());
                        return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
                }
@@ -2842,24 +2943,31 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-
                // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
                // batch, but we can receive channel_ready messages.
-               debug_assert!(
-                       non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
-                       non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
-               );
-               if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
-                       self.context.channel_state |= ChannelState::TheirChannelReady as u32;
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
-                       self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
-                       self.context.update_time_counter += 1;
-               } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
-                       // If we reconnected before sending our `channel_ready` they may still resend theirs:
-                       (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
-                                             (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
-               {
+               let mut check_reconnection = false;
+               match &self.context.channel_state {
+                       ChannelState::AwaitingChannelReady(flags) => {
+                               let flags = *flags & !FundedStateFlags::ALL;
+                               debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+                               if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+                                       // If we reconnected before sending our `channel_ready` they may still resend theirs.
+                                       check_reconnection = true;
+                               } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+                                       self.context.channel_state.set_their_channel_ready();
+                               } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
+                                       self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+                                       self.context.update_time_counter += 1;
+                               } else {
+                                       // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
+                                       debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+                               }
+                       }
+                       // If we reconnected before sending our `channel_ready` they may still resend theirs.
+                       ChannelState::ChannelReady(_) => check_reconnection = true,
+                       _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+               }
+               if check_reconnection {
                        // They probably disconnected/reconnected and re-sent the channel_ready, which is
                        // required, or they're sending a fresh SCID alias.
                        let expected_point =
@@ -2883,8 +2991,6 @@ impl<SP: Deref> Channel<SP> where
                                return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
-               } else {
-                       return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
                }
 
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
@@ -2902,17 +3008,18 @@ impl<SP: Deref> Channel<SP> where
        where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
                FE::Target: FeeEstimator, L::Target: Logger,
        {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+               }
                // We can't accept HTLCs sent after we've sent a shutdown.
-               let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if local_sent_shutdown {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
                }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
-               let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if remote_sent_shutdown {
+               if self.context.channel_state.is_remote_shutdown_sent() {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
                }
                if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
@@ -3047,7 +3154,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
-               if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        if let PendingHTLCStatus::Forward(_) = pending_forward_status {
                                panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
                        }
@@ -3097,10 +3204,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3108,10 +3215,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3120,10 +3227,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3134,13 +3241,13 @@ impl<SP: Deref> Channel<SP> where
        pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
                where L::Target: Logger
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+               if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
                        return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
@@ -3315,11 +3422,11 @@ impl<SP: Deref> Channel<SP> where
                // build_commitment_no_status_check() next which will reset this to RAAFirst.
                self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
 
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        // In case we initially failed monitor updating without requiring a response, we need
                        // to make sure the RAA gets sent first.
                        self.context.monitor_pending_revoke_and_ack = true;
-                       if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+                       if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
                                // the corresponding HTLC status updates so that
                                // get_last_commitment_update_for_send includes the right HTLCs.
@@ -3335,7 +3442,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
-               let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
@@ -3361,8 +3468,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
-                  (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
@@ -3374,7 +3480,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+               assert!(!self.context.channel_state.is_monitor_update_in_progress());
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
                                if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
@@ -3495,13 +3601,13 @@ impl<SP: Deref> Channel<SP> where
        ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger,
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+               if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
                        return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
@@ -3513,7 +3619,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+               if !self.context.channel_state.is_awaiting_remote_revoke() {
                        // Our counterparty seems to have burned their coins to us (by revoking a state when we
                        // haven't given them a new commitment transaction to broadcast). We should probably
                        // take advantage of this by updating our channel monitor, sending them an error, and
@@ -3557,7 +3663,7 @@ impl<SP: Deref> Channel<SP> where
                // (note that we may still fail to generate the new commitment_signed message, but that's
                // OK, we step the channel here and *then* if the new generation fails we can fail the
                // channel based on that, but stepping stuff here should be safe either way.
-               self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+               self.context.channel_state.clear_awaiting_remote_revoke();
                self.context.sent_message_awaiting_response = None;
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
                self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
@@ -3699,7 +3805,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        // We can't actually generate a new commitment transaction (incl by freeing holding
                        // cells) while we can't update the monitor, so we just return what we have.
                        if require_commitment {
@@ -3821,7 +3927,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
                        force_holding_cell = true;
                }
 
@@ -3846,12 +3952,12 @@ impl<SP: Deref> Channel<SP> where
        /// completed.
        /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
        pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       return Err(());
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+               if self.context.channel_state.is_pre_funded_state() {
+                       return Err(())
                }
 
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+               if self.context.channel_state.is_peer_disconnected() {
                        // While the below code should be idempotent, it's simpler to just return early, as
                        // redundant disconnect events can fire, though they should be rare.
                        return Ok(());
@@ -3913,7 +4019,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.sent_message_awaiting_response = None;
 
-               self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+               self.context.channel_state.set_peer_disconnected();
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
                Ok(())
        }
@@ -3940,7 +4046,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.monitor_pending_forwards.append(&mut pending_forwards);
                self.context.monitor_pending_failures.append(&mut pending_fails);
                self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
-               self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+               self.context.channel_state.set_monitor_update_in_progress();
        }
 
        /// Indicates that the latest ChannelMonitor update has been committed by the client
@@ -3954,19 +4060,22 @@ impl<SP: Deref> Channel<SP> where
                L::Target: Logger,
                NS::Target: NodeSigner
        {
-               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
-               self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+               assert!(self.context.channel_state.is_monitor_update_in_progress());
+               self.context.channel_state.clear_monitor_update_in_progress();
 
-               // If we're past (or at) the FundingSent stage on an outbound channel, try to
+               // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
                // first received the funding_signed.
                let mut funding_broadcastable =
-                       if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
+                       if self.context.is_outbound() &&
+                               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+                               matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+                       {
                                self.context.funding_transaction.take()
                        } else { None };
                // That said, if the funding transaction is already confirmed (ie we're active with a
                // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
-               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
                        funding_broadcastable = None;
                }
 
@@ -3997,7 +4106,7 @@ impl<SP: Deref> Channel<SP> where
                let mut finalized_claimed_htlcs = Vec::new();
                mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
 
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        self.context.monitor_pending_revoke_and_ack = false;
                        self.context.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
@@ -4034,7 +4143,7 @@ impl<SP: Deref> Channel<SP> where
                if self.context.is_outbound() {
                        return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
                Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
@@ -4073,20 +4182,15 @@ impl<SP: Deref> Channel<SP> where
                let channel_ready = if funding_signed.is_some() {
                        self.check_get_channel_ready(0)
                } else { None };
-               let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
-                       self.context.get_funding_created_msg(logger)
-               } else { None };
 
-               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
+               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
                        if commitment_update.is_some() { "a" } else { "no" },
                        if funding_signed.is_some() { "a" } else { "no" },
-                       if funding_created.is_some() { "a" } else { "no" },
                        if channel_ready.is_some() { "a" } else { "no" });
 
                SignerResumeUpdates {
                        commitment_update,
                        funding_signed,
-                       funding_created,
                        channel_ready,
                }
        }
@@ -4185,7 +4289,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
        pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
-               if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        assert!(self.context.shutdown_scriptpubkey.is_some());
                        Some(msgs::Shutdown {
                                channel_id: self.context.channel_id,
@@ -4209,7 +4313,7 @@ impl<SP: Deref> Channel<SP> where
                L::Target: Logger,
                NS::Target: NodeSigner
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+               if !self.context.channel_state.is_peer_disconnected() {
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
                        // just close here instead of trying to recover.
@@ -4259,17 +4363,17 @@ impl<SP: Deref> Channel<SP> where
 
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
                // remaining cases either succeed or ErrorMessage-fail).
-               self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+               self.context.channel_state.clear_peer_disconnected();
                self.context.sent_message_awaiting_response = None;
 
                let shutdown_msg = self.get_outbound_shutdown();
 
                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
 
-               if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+               if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
-                       if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
-                                       self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       if !self.context.channel_state.is_our_channel_ready() ||
+                                       self.context.channel_state.is_monitor_update_in_progress() {
                                if msg.next_remote_commitment_number != 0 {
                                        return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
@@ -4301,7 +4405,7 @@ impl<SP: Deref> Channel<SP> where
                        // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
                        None
                } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       if self.context.channel_state.is_monitor_update_in_progress() {
                                self.context.monitor_pending_revoke_and_ack = true;
                                None
                        } else {
@@ -4320,7 +4424,7 @@ impl<SP: Deref> Channel<SP> where
                // revoke_and_ack, not on sending commitment_signed, so we add one if have
                // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
                // the corresponding revoke_and_ack back yet.
-               let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+               let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
                if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
                        self.mark_awaiting_response();
                }
@@ -4356,7 +4460,7 @@ impl<SP: Deref> Channel<SP> where
                                log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
                        }
 
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       if self.context.channel_state.is_monitor_update_in_progress() {
                                self.context.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
@@ -4543,10 +4647,10 @@ impl<SP: Deref> Channel<SP> where
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
        ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+               if self.context.channel_state.is_pre_funded_state() {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
@@ -4557,7 +4661,7 @@ impl<SP: Deref> Channel<SP> where
                                return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
                        }
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
 
                if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
                        return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
@@ -4574,7 +4678,7 @@ impl<SP: Deref> Channel<SP> where
                // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
                // immediately after the commitment dance, but we can send a Shutdown because we won't send
                // any further commitment updates after we set LocalShutdownSent.
-               let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+               let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
 
                let update_shutdown_script = match self.context.shutdown_scriptpubkey {
                        Some(_) => false,
@@ -4594,7 +4698,7 @@ impl<SP: Deref> Channel<SP> where
 
                // From here on out, we may not fail!
 
-               self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+               self.context.channel_state.set_remote_shutdown_sent();
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
@@ -4630,7 +4734,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                });
 
-               self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+               self.context.channel_state.set_local_shutdown_sent();
                self.context.update_time_counter += 1;
 
                Ok((shutdown, monitor_update, dropped_outbound_htlcs))
@@ -4664,10 +4768,10 @@ impl<SP: Deref> Channel<SP> where
                -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
                where F::Target: FeeEstimator
        {
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+               if !self.context.channel_state.is_both_sides_shutdown() {
                        return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
                }
                if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
@@ -4681,7 +4785,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
                }
 
-               if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        self.context.pending_counterparty_closing_signed = Some(msg.clone());
                        return Ok((None, None, None));
                }
@@ -4721,7 +4825,7 @@ impl<SP: Deref> Channel<SP> where
                                        counterparty_node_id: self.context.counterparty_node_id,
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
-                               self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                               self.context.channel_state = ChannelState::ShutdownComplete;
                                self.context.update_time_counter += 1;
                                return Ok((None, Some(tx), Some(shutdown_result)));
                        }
@@ -4750,7 +4854,7 @@ impl<SP: Deref> Channel<SP> where
                                                                channel_id: self.context.channel_id,
                                                                counterparty_node_id: self.context.counterparty_node_id,
                                                        };
-                                                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                                                       self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
                                                        let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
                                                        (Some(tx), Some(shutdown_result))
@@ -4875,7 +4979,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
-               self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+               self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
        }
 
        pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
@@ -4915,7 +5019,7 @@ impl<SP: Deref> Channel<SP> where
        /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
        /// Allowed in any state (including after shutdown)
        pub fn is_awaiting_monitor_update(&self) -> bool {
-               (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+               self.context.channel_state.is_monitor_update_in_progress()
        }
 
        /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
@@ -4957,17 +5061,18 @@ impl<SP: Deref> Channel<SP> where
        /// advanced state.
        pub fn is_awaiting_initial_mon_persist(&self) -> bool {
                if !self.is_awaiting_monitor_update() { return false; }
-               if self.context.channel_state &
-                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
-                               == ChannelState::FundingSent as u32 {
+               if matches!(
+                       self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
+                       if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+               ) {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
-                       // FundingSent set, though our peer could have sent their channel_ready.
+                       // AwaitingChannelReady set, though our peer could have sent their channel_ready.
                        debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
                        return true;
                }
                if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
                        self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
-                       // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
+                       // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
                        // waiting for the initial monitor persistence. Thus, we check if our commitment
                        // transaction numbers have both been iterated only exactly once (for the
                        // funding_signed), and we're awaiting monitor update.
@@ -4989,27 +5094,25 @@ impl<SP: Deref> Channel<SP> where
 
        /// Returns true if our channel_ready has been sent
        pub fn is_our_channel_ready(&self) -> bool {
-               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
+               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
+                       matches!(self.context.channel_state, ChannelState::ChannelReady(_))
        }
 
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
        pub fn received_shutdown(&self) -> bool {
-               (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+               self.context.channel_state.is_remote_shutdown_sent()
        }
 
        /// Returns true if we either initiated or agreed to shut down the channel.
        pub fn sent_shutdown(&self) -> bool {
-               (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+               self.context.channel_state.is_local_shutdown_sent()
        }
 
        /// Returns true if this channel is fully shut down. True here implies that no further actions
        /// may/will be taken on this channel, and thus this object should be freed. Any future changes
        /// will be handled appropriately by the chain monitor.
        pub fn is_shutdown(&self) -> bool {
-               if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
-                       assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
-                       true
-               } else { false }
+               matches!(self.context.channel_state, ChannelState::ShutdownComplete)
        }
 
        pub fn channel_update_status(&self) -> ChannelUpdateStatus {
@@ -5046,35 +5149,36 @@ impl<SP: Deref> Channel<SP> where
 
                // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
                // channel_ready until the entire batch is ready.
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
-                       self.context.channel_state |= ChannelState::OurChannelReady as u32;
+               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+                       self.context.channel_state.set_our_channel_ready();
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
-                       self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+                       self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
                        self.context.update_time_counter += 1;
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
-                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
+                       if self.context.funding_tx_confirmation_height != 0 &&
+                               self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
+                       {
                                // We should never see a funding transaction on-chain until we've received
                                // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
                                // an inbound channel - before that we have no known funding TXID). The fuzzer,
                                // however, may do this and we shouldn't treat it as a bug.
                                #[cfg(not(fuzzing))]
-                               panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+                               panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
                                        Do NOT broadcast a funding transaction manually - let LDK do it for you!",
-                                       self.context.channel_state);
+                                       self.context.channel_state.to_u32());
                        }
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                };
 
                if need_commitment_update {
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
-                               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+                       if !self.context.channel_state.is_monitor_update_in_progress() {
+                               if !self.context.channel_state.is_peer_disconnected() {
                                        let next_per_commitment_point =
                                                self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
                                        return Some(msgs::ChannelReady {
@@ -5228,9 +5332,8 @@ impl<SP: Deref> Channel<SP> where
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
-                  (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+                       self.context.channel_state.is_our_channel_ready() {
                        let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
                        if self.context.funding_tx_confirmation_height == 0 {
                                // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
@@ -5257,8 +5360,8 @@ impl<SP: Deref> Channel<SP> where
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
                        log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
-                       assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
-                       assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
+                       assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
+                       assert!(!self.context.channel_state.is_our_channel_ready());
                        return Err(ClosureReason::FundingTimedOut);
                }
 
@@ -5356,7 +5459,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
                        return None;
                }
@@ -5494,7 +5597,7 @@ impl<SP: Deref> Channel<SP> where
        /// May panic if called on a channel that wasn't immediately-previously
        /// self.remove_uncommitted_htlcs_and_mark_paused()'d
        pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+               assert!(self.context.channel_state.is_peer_disconnected());
                assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
                // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
                // current to_remote balances. However, it no longer has any use, and thus is now simply
@@ -5530,7 +5633,7 @@ impl<SP: Deref> Channel<SP> where
                        // (which is one further, as they always revoke previous commitment transaction, not
                        // the one we send) so we have to decrement by 1. Note that if
                        // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
-                       // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+                       // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
                        // overflow here.
                        next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
                        your_last_per_commitment_secret: remote_last_secret,
@@ -5592,7 +5695,10 @@ impl<SP: Deref> Channel<SP> where
        ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+                       self.context.channel_state.is_local_shutdown_sent() ||
+                       self.context.channel_state.is_remote_shutdown_sent()
+               {
                        return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
                }
                let channel_total_msat = self.context.channel_value_satoshis * 1000;
@@ -5615,7 +5721,7 @@ impl<SP: Deref> Channel<SP> where
                                available_balances.next_outbound_htlc_limit_msat)));
                }
 
-               if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        // Note that this should never really happen, if we're !is_live() on receipt of an
                        // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
                        // the user to send directly into a !is_live() channel. However, if we
@@ -5625,7 +5731,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+               let need_holding_cell = self.context.channel_state.should_force_holding_cell();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
@@ -5732,7 +5838,7 @@ impl<SP: Deref> Channel<SP> where
                                to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
                        }]
                };
-               self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+               self.context.channel_state.set_awaiting_remote_revoke();
                monitor_update
        }
 
@@ -5866,44 +5972,32 @@ impl<SP: Deref> Channel<SP> where
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
        /// holding cell HTLCs for payment failure.
-       ///
-       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
-       /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
                                return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
                        }
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
-                       if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
-                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
-                       }
-                       else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
-                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
-                       }
+               if self.context.channel_state.is_local_shutdown_sent() {
+                       return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+               }
+               else if self.context.channel_state.is_remote_shutdown_sent() {
+                       return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
                }
                if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
                        return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+               if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
 
-               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
-               // script is set, we just force-close and call it a day.
-               let mut chan_closed = false;
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       chan_closed = true;
-               }
-
                let update_shutdown_script = match self.context.shutdown_scriptpubkey {
                        Some(_) => false,
-                       None if !chan_closed => {
+                       None => {
                                // use override shutdown script if provided
                                let shutdown_scriptpubkey = match override_shutdown_script {
                                        Some(script) => script,
@@ -5921,25 +6015,11 @@ impl<SP: Deref> Channel<SP> where
                                self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
                        },
-                       None => false,
                };
 
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       let shutdown_result = ShutdownResult {
-                               monitor_update: None,
-                               dropped_outbound_htlcs: Vec::new(),
-                               unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
-                               channel_id: self.context.channel_id,
-                               counterparty_node_id: self.context.counterparty_node_id,
-                       };
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       Some(shutdown_result)
-               } else {
-                       self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
-                       None
-               };
+               self.context.channel_state.set_local_shutdown_sent();
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
@@ -5975,7 +6055,7 @@ impl<SP: Deref> Channel<SP> where
                debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
                        "we can't both complete shutdown and return a monitor update");
 
-               Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
+               Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
 
        pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
@@ -6086,7 +6166,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                                channel_id: temporary_channel_id,
                                temporary_channel_id: Some(temporary_channel_id),
-                               channel_state: ChannelState::OurInitSent as u32,
+                               channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
                                announcement_sigs_state: AnnouncementSigsState::NotSent,
                                secp_ctx,
                                channel_value_satoshis,
@@ -6203,6 +6283,38 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                })
        }
 
+       /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
+       fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let signature = match &self.context.holder_signer {
+                       // TODO (taproot|arik): move match into calling method for Taproot
+                       ChannelSignerType::Ecdsa(ecdsa) => {
+                               ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
+                                       .map(|(sig, _)| sig).ok()?
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
+               };
+
+               if self.context.signer_pending_funding {
+                       log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+                       self.context.signer_pending_funding = false;
+               }
+
+               Some(msgs::FundingCreated {
+                       temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+                       funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
+                       funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
+                       signature,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+                       #[cfg(taproot)]
+                       next_local_nonce: None,
+               })
+       }
+
        /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
        /// a funding_created message for the remote peer.
        /// Panics if called at some time other than immediately after initial handshake, if called twice,
@@ -6210,12 +6322,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
-       -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
+       pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
+       -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
@@ -6229,7 +6344,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // Now that we're past error-generating stuff, update our local state:
 
-               self.context.channel_state = ChannelState::FundingCreated as u32;
+               self.context.channel_state = ChannelState::FundingNegotiated;
                self.context.channel_id = funding_txo.to_channel_id();
 
                // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
@@ -6243,7 +6358,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.funding_transaction = Some(funding_transaction);
                self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
 
-               let funding_created = self.context.get_funding_created_msg(logger);
+               let funding_created = self.get_funding_created_msg(logger);
                if funding_created.is_none() {
                        if !self.context.signer_pending_funding {
                                log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
@@ -6251,11 +6366,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        }
                }
 
-               let channel = Channel {
-                       context: self.context,
-               };
-
-               Ok((channel, funding_created))
+               Ok(funding_created)
        }
 
        fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
@@ -6290,7 +6401,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        where
                F::Target: FeeEstimator
        {
-               if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+               if !self.context.is_outbound() ||
+                       !matches!(
+                               self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                               if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+                       )
+               {
+                       return Err(());
+               }
                if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
                        // We've exhausted our options
                        return Err(());
@@ -6321,7 +6439,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                if !self.context.is_outbound() {
                        panic!("Tried to open a channel for an inbound channel?");
                }
-               if self.context.channel_state != ChannelState::OurInitSent as u32 {
+               if self.context.have_received_message() {
                        panic!("Cannot generate an open_channel after we've moved forward");
                }
 
@@ -6367,7 +6485,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                if !self.context.is_outbound() {
                        return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
                }
-               if self.context.channel_state != ChannelState::OurInitSent as u32 {
+               if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
                        return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
                }
                if msg.dust_limit_satoshis > 21000000 * 100000000 {
@@ -6484,11 +6602,119 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
                self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
 
-               self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+               self.context.channel_state = ChannelState::NegotiatingFunding(
+                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+               );
                self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
 
                Ok(())
        }
+
+       /// Handles a funding_signed message from the remote end.
+       /// If this call is successful, broadcast the funding transaction (and not before!)
+       pub fn funding_signed<L: Deref>(
+               mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+       ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
+       where
+               L::Target: Logger
+       {
+               if !self.context.is_outbound() {
+                       return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+               }
+               if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
+                       return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+               }
+               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+               }
+
+               let funding_script = self.context.get_funding_redeemscript();
+
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+
+               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+               {
+                       let trusted_tx = initial_commitment_tx.trust();
+                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+                       // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+                               return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+                       }
+               }
+
+               let holder_commitment_tx = HolderCommitmentTransaction::new(
+                       initial_commitment_tx,
+                       msg.signature,
+                       Vec::new(),
+                       &self.context.get_holder_pubkeys().funding_pubkey,
+                       self.context.counterparty_funding_pubkey()
+               );
+
+               let validated =
+                       self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
+               if validated.is_err() {
+                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+               }
+
+               let funding_redeemscript = self.context.get_funding_redeemscript();
+               let funding_txo = self.context.get_funding_txo().unwrap();
+               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
+                                                         &self.context.destination_script, (funding_txo, funding_txo_script),
+                                                         &self.context.channel_transaction_parameters,
+                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
+                                                         obscure_factor,
+                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
+               channel_monitor.provide_initial_counterparty_commitment_tx(
+                       counterparty_initial_bitcoin_tx.txid, Vec::new(),
+                       self.context.cur_counterparty_commitment_transaction_number,
+                       self.context.counterparty_cur_commitment_point.unwrap(),
+                       counterparty_initial_commitment_tx.feerate_per_kw(),
+                       counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+
+               assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
+               if self.context.is_batch_funding() {
+                       self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
+               } else {
+                       self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+               }
+               self.context.cur_holder_commitment_transaction_number -= 1;
+               self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
+
+               let mut channel = Channel { context: self.context };
+
+               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+               Ok((channel, channel_monitor))
+       }
+
+       /// Indicates that the signer may have some signatures for us, so we should retry if we're
+       /// blocked.
+       #[allow(unused)]
+       pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+               if self.context.signer_pending_funding && self.context.is_outbound() {
+                       log_trace!(logger, "Signer unblocked a funding_created");
+                       self.get_funding_created_msg(logger)
+               } else { None }
+       }
 }
 
 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
@@ -6721,7 +6947,9 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                                temporary_channel_id: Some(msg.temporary_channel_id),
                                channel_id: msg.temporary_channel_id,
-                               channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+                               channel_state: ChannelState::NegotiatingFunding(
+                                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+                               ),
                                announcement_sigs_state: AnnouncementSigsState::NotSent,
                                secp_ctx,
 
@@ -6851,7 +7079,10 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                if self.context.is_outbound() {
                        panic!("Tried to send accept_channel for an outbound channel?");
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        panic!("Tried to send accept_channel after channel had moved forward");
                }
                if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
@@ -6931,7 +7162,10 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                if self.context.is_outbound() {
                        return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
                        // remember the channel, so it's safe to just send an error_message here and drop the
                        // channel.
@@ -6976,7 +7210,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // Now that we're past error-generating stuff, update our local state:
 
-               self.context.channel_state = ChannelState::FundingSent as u32;
+               self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                self.context.channel_id = funding_txo.to_channel_id();
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
@@ -6996,13 +7230,12 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
                                                          holder_commitment_tx, best_block, self.context.counterparty_node_id);
-               let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number + 1,
                        self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
                        counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
-                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
+                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 
                log_info!(logger, "{} funding_signed for peer for channel {}",
                        if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
@@ -7095,7 +7328,13 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                writer.write_all(&[0; 8])?;
 
                self.context.channel_id.write(writer)?;
-               (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
+               {
+                       let mut channel_state = self.context.channel_state;
+                       if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
+                               channel_state.set_peer_disconnected();
+                       }
+                       channel_state.to_u32().write(writer)?;
+               }
                self.context.channel_value_satoshis.write(writer)?;
 
                self.context.latest_monitor_update_id.write(writer)?;
@@ -7415,7 +7654,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                }
 
                let channel_id = Readable::read(reader)?;
-               let channel_state = Readable::read(reader)?;
+               let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
                let channel_value_satoshis = Readable::read(reader)?;
 
                let latest_monitor_update_id = Readable::read(reader)?;
@@ -7705,8 +7944,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
                        // If we've gotten to the funding stage of the channel, populate the signer with its
                        // required channel parameters.
-                       let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
-                       if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
+                       if channel_state >= ChannelState::FundingNegotiated {
                                holder_signer.provide_channel_parameters(&channel_parameters);
                        }
                        (channel_keys_id, holder_signer)
@@ -7932,7 +8170,7 @@ mod tests {
        use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
+       use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
        use crate::ln::msgs;
@@ -8105,11 +8343,12 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Put some inbound and outbound HTLCs in A's channel.
                let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
@@ -8233,11 +8472,12 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
@@ -8421,11 +8661,12 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Make sure that receiving a channel update will update the Channel as expected.
                let update = ChannelUpdate {
@@ -9591,11 +9832,8 @@ mod tests {
                                },
                        ]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
-                       tx.clone(),
-                       funding_outpoint,
-                       true,
-                       &&logger,
+               let funding_created_msg = node_a_chan.get_funding_created(
+                       tx.clone(), funding_outpoint, true, &&logger,
                ).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
                        &funding_created_msg.unwrap(),
@@ -9613,12 +9851,10 @@ mod tests {
 
                // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
                // broadcasting the funding transaction until the batch is ready.
-               let _ = node_a_chan.funding_signed(
-                       &funding_signed_msg.unwrap(),
-                       best_block,
-                       &&keys_provider,
-                       &&logger,
-               ).unwrap();
+               let res = node_a_chan.funding_signed(
+                       &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
+               );
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
                let node_a_updates = node_a_chan.monitor_updating_restored(
                        &&logger,
                        &&keys_provider,
@@ -9630,11 +9866,7 @@ mod tests {
                // as the funding transaction depends on all channels in the batch becoming ready.
                assert!(node_a_updates.channel_ready.is_none());
                assert!(node_a_updates.funding_broadcastable.is_none());
-               assert_eq!(
-                       node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::WaitingForBatch as u32,
-               );
+               assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
 
                // It is possible to receive a 0conf channel_ready from the remote node.
                node_a_chan.channel_ready(
@@ -9647,18 +9879,12 @@ mod tests {
                ).unwrap();
                assert_eq!(
                        node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::WaitingForBatch as u32 |
-                       ChannelState::TheirChannelReady as u32,
+                       ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
                );
 
                // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
                node_a_chan.set_batch_ready();
-               assert_eq!(
-                       node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::TheirChannelReady as u32,
-               );
+               assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
                assert!(node_a_chan.check_get_channel_ready(0).is_some());
        }
 }
index a3842285263f604bbda4b46d599db205bcc0cfbb..7c6ee1e429c1d1a325759d22c0a73207fa50d730 100644 (file)
@@ -109,50 +109,79 @@ use crate::ln::script::ShutdownScript;
 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
 // our payment, which we can use to decode errors or inform the user that the payment was sent.
 
-/// Routing info for an inbound HTLC onion.
+/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub enum PendingHTLCRouting {
-       /// A forwarded HTLC.
+       /// An HTLC which should be forwarded on to another node.
        Forward {
-               /// BOLT 4 onion packet.
+               /// The onion which should be included in the forwarded HTLC, telling the next hop what to
+               /// do with the HTLC.
                onion_packet: msgs::OnionPacket,
-               /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
-               /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
+               /// The short channel ID of the channel which we were instructed to forward this HTLC to.
+               ///
+               /// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
+               /// to the receiving node, such as one returned from
+               /// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
                /// Set if this HTLC is being forwarded within a blinded path.
                blinded: Option<BlindedForward>,
        },
-       /// An HTLC paid to an invoice (supposedly) generated by us.
-       /// At this point, we have not checked that the invoice being paid was actually generated by us,
-       /// but rather it's claiming to pay an invoice of ours.
+       /// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
+       ///
+       /// Note that at this point, we have not checked that the invoice being paid was actually
+       /// generated by us, but rather it's claiming to pay an invoice of ours.
        Receive {
-               /// Payment secret and total msat received.
+               /// Information about the amount the sender intended to pay and (potential) proof that this
+               /// is a payment for an invoice we generated. This proof of payment is is also used for
+               /// linking MPP parts of a larger payment.
                payment_data: msgs::FinalOnionHopData,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
                /// CLTV expiry of the received HTLC.
+               ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// Shared secret derived using a phantom node secret key. If this field is Some, the
-               /// payment was sent to a phantom node (one hop beyond the current node), but can be
-               /// settled by this node.
+               /// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
+               /// provide the onion shared secret used to decrypt the next level of forwarding
+               /// instructions.
                phantom_shared_secret: Option<[u8; 32]>,
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
-       /// Incoming keysend (sender provided the preimage in a TLV).
+       /// The onion indicates that this is for payment to us but which contains the preimage for
+       /// claiming included, and is unrelated to any invoice we'd previously generated (aka a
+       /// "keysend" or "spontaneous" payment).
        ReceiveKeysend {
-               /// This was added in 0.0.116 and will break deserialization on downgrades.
+               /// Information about the amount the sender intended to pay and possibly a token to
+               /// associate MPP parts of a larger payment.
+               ///
+               /// This will only be filled in if receiving MPP keysend payments is enabled, and it being
+               /// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
                payment_data: Option<msgs::FinalOnionHopData>,
                /// Preimage for this onion payment. This preimage is provided by the sender and will be
                /// used to settle the spontaneous payment.
                payment_preimage: PaymentPreimage,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately bubble back up as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
                /// CLTV expiry of the received HTLC.
+               ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, these will ultimately bubble back up as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
 }
@@ -179,25 +208,47 @@ impl PendingHTLCRouting {
        }
 }
 
-/// Full details of an incoming HTLC, including routing info.
+/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
+/// should go next.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub struct PendingHTLCInfo {
        /// Further routing details based on whether the HTLC is being forwarded or received.
        pub routing: PendingHTLCRouting,
-       /// Shared secret from the previous hop.
-       /// Used encrypt failure packets in the event that the HTLC needs to be failed backwards.
+       /// The onion shared secret we build with the sender used to decrypt the onion.
+       ///
+       /// This is later used to encrypt failure packets in the event that the HTLC is failed.
        pub incoming_shared_secret: [u8; 32],
        /// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
        pub payment_hash: PaymentHash,
-       /// Amount offered by this HTLC.
-       pub incoming_amt_msat: Option<u64>, // Added in 0.0.113
-       /// Sender intended amount to forward or receive (actual amount received
-       /// may overshoot this in either case)
+       /// Amount received in the incoming HTLC.
+       ///
+       /// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
+       /// versions.
+       pub incoming_amt_msat: Option<u64>,
+       /// The amount the sender indicated should be forwarded on to the next hop or amount the sender
+       /// intended for us to receive for received payments.
+       ///
+       /// If the received amount is less than this for received payments, an intermediary hop has
+       /// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
+       /// it along another path).
+       ///
+       /// Because nodes can take less than their required fees, and because senders may wish to
+       /// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
+       /// received payments. In such cases, recipients must handle this HTLC as if it had received
+       /// [`Self::outgoing_amt_msat`].
        pub outgoing_amt_msat: u64,
-       /// Outgoing timelock expiration blockheight.
+       /// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
+       /// should have been set on the received HTLC for received payments).
        pub outgoing_cltv_value: u32,
-       /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
-       /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+       /// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
+       ///
+       /// If this is a payment for forwarding, this is the fee we are taking before forwarding the
+       /// HTLC.
+       ///
+       /// If this is a received payment, this is the fee that our counterparty took.
+       ///
+       /// This is used to allow LSPs to take fees as a part of payments, without the sender having to
+       /// shoulder them.
        pub skimmed_fee_msat: Option<u64>,
 }
 
@@ -2663,9 +2714,10 @@ where
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let shutdown_result;
-               loop {
+               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+               let mut shutdown_result = None;
+
+               {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -2679,11 +2731,9 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
-                                               shutdown_result = local_shutdown_result;
-                                               debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
 
                                                // We can send the `shutdown` message before updating the `ChannelMonitor`
                                                // here as we don't need the monitor update to complete until we send a
@@ -2700,30 +2750,20 @@ where
                                                if let Some(monitor_update) = monitor_update_opt.take() {
                                                        handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
-                                                       break;
                                                }
-
-                                               if chan.is_shutdown() {
-                                                       if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: channel_update
-                                                                       });
-                                                               }
-                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
-                                                       }
-                                               }
-                                               break;
+                                       } else {
+                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
+                                               let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
-                                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
-                                       // it does not exist for this peer. Either way, we can attempt to force-close it.
-                                       //
-                                       // An appropriate error will be returned for non-existence of the channel if that's the case.
-                                       mem::drop(peer_state_lock);
-                                       mem::drop(per_peer_state);
-                                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                                       return Err(APIError::ChannelUnavailable {
+                                               err: format!(
+                                                       "Channel with id {} not found for the passed counterparty node_id {}",
+                                                       channel_id, counterparty_node_id,
+                                               )
+                                       });
                                },
                        }
                }
@@ -3220,7 +3260,7 @@ where
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
                let logger = WithChannelContext::from(&self.logger, &chan.context);
-               log_trace!(logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0));
+               log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -3231,7 +3271,7 @@ where
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
                let logger = WithChannelContext::from(&self.logger, &chan.context);
-               log_trace!(logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0));
+               log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -3679,7 +3719,7 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
-                       Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
+                       Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let logger = WithChannelContext::from(&self.logger, &chan.context);
@@ -3692,7 +3732,7 @@ where
                                                (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
                                        } else { unreachable!(); });
                                match funding_res {
-                                       Ok((chan, funding_msg)) => (chan, funding_msg),
+                                       Ok(funding_msg) => (chan, funding_msg),
                                        Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
@@ -3732,7 +3772,7 @@ where
                                if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
-                               e.insert(ChannelPhase::Funded(chan));
+                               e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                }
                Ok(())
@@ -5378,7 +5418,7 @@ where
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
                                                let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
-                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+                                               log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                        } else { errs.push((pk, err)); }
                                }
                        }
@@ -6259,22 +6299,43 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
-                               match chan_phase_entry.get_mut() {
-                                       ChannelPhase::Funded(ref mut chan) => {
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                               let monitor = try_chan_phase_entry!(self,
-                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry);
-                                               if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
-                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
-                                                       Ok(())
-                                               } else {
-                                                       try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
+                       hash_map::Entry::Occupied(chan_phase_entry) => {
+                               if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
+                                       let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
+                                       let logger = WithContext::from(
+                                               &self.logger,
+                                               Some(chan.context.get_counterparty_node_id()),
+                                               Some(chan.context.channel_id())
+                                       );
+                                       let res =
+                                               chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
+                                       match res {
+                                               Ok((chan, monitor)) => {
+                                                       if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+                                                               // We really should be able to insert here without doing a second
+                                                               // lookup, but sadly rust stdlib doesn't currently allow keeping
+                                                               // the original Entry around with the value removed.
+                                                               let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
+                                                               if let ChannelPhase::Funded(ref mut chan) = &mut chan {
+                                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+                                                               } else { unreachable!(); }
+                                                               Ok(())
+                                                       } else {
+                                                               let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
+                                                       }
+                                               },
+                                               Err((chan, e)) => {
+                                                       debug_assert!(matches!(e, ChannelError::Close(_)),
+                                                               "We don't have a channel anymore, so the error better have expected close");
+                                                       // We've already removed this outbound channel from the map in
+                                                       // `PeerState` above so at this point we just need to clean up any
+                                                       // lingering entries concerning this channel as it is safe to do so.
+                                                       return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
                                                }
-                                       },
-                                       _ => {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
-                                       },
+                                       }
+                               } else {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
                                }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -7010,7 +7071,7 @@ where
                                },
                                hash_map::Entry::Vacant(_) => {
                                        log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
-                                               log_bytes!(msg.channel_id.0));
+                                               msg.channel_id);
                                        // Unfortunately, lnd doesn't force close on errors
                                        // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
                                        // One of the few ways to get an lnd counterparty to force close is by
@@ -7200,29 +7261,34 @@ where
 
                let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
                        let node_id = phase.context().get_counterparty_node_id();
-                       if let ChannelPhase::Funded(chan) = phase {
-                               let msgs = chan.signer_maybe_unblocked(&self.logger);
-                               if let Some(updates) = msgs.commitment_update {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id,
-                                               updates,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_signed {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
-                                               node_id,
-                                               msg,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_created {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                                               node_id,
-                                               msg,
-                                       });
+                       match phase {
+                               ChannelPhase::Funded(chan) => {
+                                       let msgs = chan.signer_maybe_unblocked(&self.logger);
+                                       if let Some(updates) = msgs.commitment_update {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id,
+                                                       updates,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.funding_signed {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.channel_ready {
+                                               send_channel_ready!(self, pending_msg_events, chan, msg);
+                                       }
                                }
-                               if let Some(msg) = msgs.channel_ready {
-                                       send_channel_ready!(self, pending_msg_events, chan, msg);
+                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                       if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
                                }
+                               ChannelPhase::UnfundedInboundV1(_) => {},
                        }
                };
 
@@ -10448,7 +10514,7 @@ where
                                                                hash_map::Entry::Occupied(mut entry) => {
                                                                        let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
                                                                        log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
-                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
+                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), htlc.payment_hash);
                                                                },
                                                                hash_map::Entry::Vacant(entry) => {
                                                                        let path_fee = path.fee_msat();
@@ -10734,8 +10800,7 @@ where
                                                        }
                                                }
                                                if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
-                                                       let logger = WithChannelMonitor::from(&args.logger, previous_hop_monitor);
-                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &&logger);
+                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
                                                }
                                        }
                                        pending_events_read.push_back((events::Event::PaymentClaimed {
index a41cf306880d73220742168f64a4258327b5043b..fee3a3b399475d885ee1ac7996f9f246c771eb81 100644 (file)
@@ -9059,7 +9059,7 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let (_, funding_created) = {
+       let funding_created = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                // Once we call `get_funding_created` the channel has a duplicate channel_id as
@@ -9067,7 +9067,7 @@ fn test_duplicate_chan_id() {
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
                match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
-                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                       ChannelPhase::UnfundedOutboundV1(mut chan) => {
                                let logger = test_utils::TestLogger::new();
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
                        },
index 2d871b354a26d5643bece3211ddaa9986978bebd..b877565e01725bf37e402ddd8ed5985418d982e3 100644 (file)
@@ -52,7 +52,7 @@ use core::fmt::Display;
 use crate::io::{self, Cursor, Read};
 use crate::io_extras::read_to_end;
 
-use crate::events::MessageSendEventsProvider;
+use crate::events::{EventsProvider, MessageSendEventsProvider};
 use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
 use crate::util::logger;
 use crate::util::ser::{LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
@@ -1631,7 +1631,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
 }
 
 /// A handler for received [`OnionMessage`]s and for providing generated ones to send.
-pub trait OnionMessageHandler {
+pub trait OnionMessageHandler: EventsProvider {
        /// Handle an incoming `onion_message` message from the given peer.
        fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage);
 
@@ -1650,6 +1650,10 @@ pub trait OnionMessageHandler {
        /// drop and refuse to forward onion messages to this peer.
        fn peer_disconnected(&self, their_node_id: &PublicKey);
 
+       /// Performs actions that should happen roughly every ten seconds after startup. Allows handlers
+       /// to drop any buffered onion messages intended for prospective peers.
+       fn timer_tick_occurred(&self);
+
        // Handler information:
        /// Gets the node feature flags which this handler itself supports. All available handlers are
        /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
index ca15a37c072d89625a0a235fb226d170f758cdb4..06a9ae07e1e0024c075082711d9064278227cf5a 100644 (file)
@@ -1,6 +1,7 @@
-//! Utilities for channelmanager.rs
+//! Utilities to decode payment onions and do contextless validation of incoming payments.
 //!
-//! Includes a public [`peel_payment_onion`] function for use by external projects or libraries.
+//! Primarily features [`peel_payment_onion`], which allows the decoding of an onion statelessly
+//! and can be used to predict whether we'd accept a payment.
 
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -225,7 +226,9 @@ pub(super) fn create_recv_pending_htlc_info(
        })
 }
 
-/// Peel one layer off an incoming onion, returning [`PendingHTLCInfo`] (either Forward or Receive).
+/// Peel one layer off an incoming onion, returning a [`PendingHTLCInfo`] that contains information
+/// about the intended next-hop for the HTLC.
+///
 /// This does all the relevant context-free checks that LDK requires for payment relay or
 /// acceptance. If the payment is to be received, and the amount matches the expected amount for
 /// a given invoice, this indicates the [`msgs::UpdateAddHTLC`], once fully committed in the
@@ -234,7 +237,7 @@ pub(super) fn create_recv_pending_htlc_info(
 /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
 pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
        msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
-       cur_height: u32, accept_mpp_keysend: bool,
+       cur_height: u32, accept_mpp_keysend: bool, allow_skimmed_fees: bool,
 ) -> Result<PendingHTLCInfo, InboundOnionErr>
 where
        NS::Target: NodeSigner,
@@ -273,6 +276,10 @@ where
                                        err_data: Vec::new(),
                                });
                        }
+
+                       // TODO: If this is potentially a phantom payment we should decode the phantom payment
+                       // onion here and check it.
+
                        create_fwd_pending_htlc_info(
                                msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret,
                                Some(next_packet_pubkey)
@@ -281,7 +288,7 @@ where
                onion_utils::Hop::Receive(received_data) => {
                        create_recv_pending_htlc_info(
                                received_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry,
-                               None, false, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
+                               None, allow_skimmed_fees, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
                        )?
                }
        })
@@ -477,7 +484,7 @@ mod tests {
                let msg = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, onion);
                let logger = test_utils::TestLogger::with_id("bob".to_string());
 
-               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true)
+               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true, false)
                        .map_err(|e| e.msg).unwrap();
 
                let next_onion = match peeled.routing {
@@ -488,7 +495,7 @@ mod tests {
                };
 
                let msg2 = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, next_onion);
-               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true)
+               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true, false)
                        .map_err(|e| e.msg).unwrap();
 
                match peeled2.routing {
index f061772890bf4c168a1735ce1a8c1527ae3431d0..6a935acf00d7b6cbb1cebbf4e633db05d65db3b0 100644 (file)
@@ -19,7 +19,7 @@ use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
 use crate::sign::{KeysManager, NodeSigner, Recipient};
-use crate::events::{MessageSendEvent, MessageSendEventsProvider};
+use crate::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
@@ -89,6 +89,9 @@ pub trait CustomMessageHandler: wire::CustomMessageReader {
 /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
 /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler.
 pub struct IgnoringMessageHandler{}
+impl EventsProvider for IgnoringMessageHandler {
+       fn process_pending_events<H: Deref>(&self, _handler: H) where H::Target: EventHandler {}
+}
 impl MessageSendEventsProvider for IgnoringMessageHandler {
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> { Vec::new() }
 }
@@ -115,6 +118,7 @@ impl OnionMessageHandler for IgnoringMessageHandler {
        fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
        fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+       fn timer_tick_occurred(&self) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
                InitFeatures::empty()
@@ -680,6 +684,8 @@ pub trait APeerManager {
        type NS: Deref<Target=Self::NST>;
        /// Gets a reference to the underlying [`PeerManager`].
        fn as_ref(&self) -> &PeerManager<Self::Descriptor, Self::CM, Self::RM, Self::OM, Self::L, Self::CMH, Self::NS>;
+       /// Returns the peer manager's [`OnionMessageHandler`].
+       fn onion_message_handler(&self) -> &Self::OMT;
 }
 
 impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref>
@@ -705,6 +711,9 @@ APeerManager for PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
        type NST = <NS as Deref>::Target;
        type NS = NS;
        fn as_ref(&self) -> &PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> { self }
+       fn onion_message_handler(&self) -> &Self::OMT {
+               self.message_handler.onion_message_handler.deref()
+       }
 }
 
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
@@ -1286,8 +1295,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                let mut read_pos = 0;
                                while read_pos < data.len() {
                                        macro_rules! try_potential_handleerror {
-                                               ($peer: expr, $thing: expr) => {
-                                                       match $thing {
+                                               ($peer: expr, $thing: expr) => {{
+                                                       let res = $thing;
+                                                       let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None);
+                                                       match res {
                                                                Ok(x) => x,
                                                                Err(e) => {
                                                                        match e.action {
@@ -1297,7 +1308,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        // re-entrant code and possibly unexpected behavior. The
                                                                                        // message send is optimistic anyway, and in this case
                                                                                        // we immediately disconnect the peer.
-                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::DisconnectPeerWithWarning { .. } => {
@@ -1306,32 +1317,32 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        // re-entrant code and possibly unexpected behavior. The
                                                                                        // message send is optimistic anyway, and in this case
                                                                                        // we immediately disconnect the peer.
-                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                                                       log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_given_level!(logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        continue
                                                                                },
                                                                                msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
                                                                                msgs::ErrorAction::IgnoreError => {
-                                                                                       log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        continue;
                                                                                },
                                                                                msgs::ErrorAction::SendErrorMessage { msg } => {
-                                                                                       log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        self.enqueue_message($peer, &msg);
                                                                                        continue;
                                                                                },
                                                                                msgs::ErrorAction::SendWarningMessage { msg, log_level } => {
-                                                                                       log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        self.enqueue_message($peer, &msg);
                                                                                        continue;
                                                                                },
                                                                        }
                                                                }
                                                        }
-                                               }
+                                               }}
                                        }
 
                                        let mut peer_lock = peer_mutex.lock().unwrap();
@@ -2005,25 +2016,29 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendStfu { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendStfu event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSplice { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSplice event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSplice event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSpliceAck { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSpliceLocked { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
index 308211d0b370285e509c6885a60de084ba214d06..bdd32e90d0ace1cca8b1fd46c4eac99ecb486b61 100644 (file)
@@ -277,6 +277,21 @@ fn shutdown_on_unfunded_channel() {
        check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000);
 }
 
+#[test]
+fn close_on_unfunded_channel() {
+       // Test the user asking us to close prior to funding generation
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None, None).unwrap();
+       let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+}
+
 #[test]
 fn expect_channel_shutdown_state_with_force_closure() {
        // Test sending a shutdown prior to channel_ready after funding generation
index c43b218df7b16015364966ed07882b79d29db325..9323be5fff2d36b2d586a9ed60948d71993daa64 100644 (file)
@@ -10,8 +10,9 @@
 //! Onion message testing and test utilities live here.
 
 use crate::blinded_path::BlindedPath;
+use crate::events::{Event, EventsProvider};
 use crate::ln::features::InitFeatures;
-use crate::ln::msgs::{self, DecodeError, OnionMessageHandler};
+use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
 use crate::sign::{NodeSigner, Recipient};
 use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
 use crate::util::test_utils;
@@ -28,10 +29,11 @@ use crate::sync::{Arc, Mutex};
 use crate::prelude::*;
 
 struct MessengerNode {
-       keys_manager: Arc<test_utils::TestKeysInterface>,
+       node_id: PublicKey,
+       entropy_source: Arc<test_utils::TestKeysInterface>,
        messenger: OnionMessenger<
                Arc<test_utils::TestKeysInterface>,
-               Arc<test_utils::TestKeysInterface>,
+               Arc<test_utils::TestNodeSigner>,
                Arc<test_utils::TestLogger>,
                Arc<TestMessageRouter>,
                Arc<TestOffersMessageHandler>,
@@ -40,12 +42,6 @@ struct MessengerNode {
        custom_message_handler: Arc<TestCustomMessageHandler>,
 }
 
-impl MessengerNode {
-       fn get_node_pk(&self) -> PublicKey {
-               self.keys_manager.get_node_id(Recipient::Node).unwrap()
-       }
-}
-
 struct TestMessageRouter {}
 
 impl MessageRouter for TestMessageRouter {
@@ -55,6 +51,8 @@ impl MessageRouter for TestMessageRouter {
                Ok(OnionMessagePath {
                        intermediate_nodes: vec![],
                        destination,
+                       first_node_addresses:
+                               Some(vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }]),
                })
        }
 }
@@ -155,44 +153,69 @@ impl CustomOnionMessageHandler for TestCustomMessageHandler {
 }
 
 fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
+       let secrets = (1..=num_messengers)
+               .into_iter()
+               .map(|i| SecretKey::from_slice(&[i; 32]).unwrap())
+               .collect();
+       create_nodes_using_secrets(secrets)
+}
+
+fn create_nodes_using_secrets(secrets: Vec<SecretKey>) -> Vec<MessengerNode> {
        let mut nodes = Vec::new();
-       for i in 0..num_messengers {
+       for (i, secret_key) in secrets.into_iter().enumerate() {
                let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
                let seed = [i as u8; 32];
-               let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               let entropy_source = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               let node_signer = Arc::new(test_utils::TestNodeSigner::new(secret_key));
+
                let message_router = Arc::new(TestMessageRouter {});
                let offers_message_handler = Arc::new(TestOffersMessageHandler {});
                let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
                nodes.push(MessengerNode {
-                       keys_manager: keys_manager.clone(),
+                       node_id: node_signer.get_node_id(Recipient::Node).unwrap(),
+                       entropy_source: entropy_source.clone(),
                        messenger: OnionMessenger::new(
-                               keys_manager.clone(), keys_manager, logger.clone(), message_router,
+                               entropy_source, node_signer, logger.clone(), message_router,
                                offers_message_handler, custom_message_handler.clone()
                        ),
                        custom_message_handler,
                });
        }
-       for idx in 0..num_messengers - 1 {
-               let i = idx as usize;
-               let mut features = InitFeatures::empty();
-               features.set_onion_messages_optional();
-               let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
-               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap();
-               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap();
+       for i in 0..nodes.len() - 1 {
+               connect_peers(&nodes[i], &nodes[i + 1]);
        }
        nodes
 }
 
+fn connect_peers(node_a: &MessengerNode, node_b: &MessengerNode) {
+       let mut features = InitFeatures::empty();
+       features.set_onion_messages_optional();
+       let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
+       node_a.messenger.peer_connected(&node_b.node_id, &init_msg.clone(), true).unwrap();
+       node_b.messenger.peer_connected(&node_a.node_id, &init_msg.clone(), false).unwrap();
+}
+
+fn disconnect_peers(node_a: &MessengerNode, node_b: &MessengerNode) {
+       node_a.messenger.peer_disconnected(&node_b.node_id);
+       node_b.messenger.peer_disconnected(&node_a.node_id);
+}
+
+fn release_events(node: &MessengerNode) -> Vec<Event> {
+       let events = core::cell::RefCell::new(Vec::new());
+       node.messenger.process_pending_events(&|e| events.borrow_mut().push(e));
+       events.into_inner()
+}
+
 fn pass_along_path(path: &Vec<MessengerNode>) {
        let mut prev_node = &path[0];
        for node in path.into_iter().skip(1) {
                let events = prev_node.messenger.release_pending_msgs();
                let onion_msg =  {
-                       let msgs = events.get(&node.get_node_pk()).unwrap();
+                       let msgs = events.get(&node.node_id).unwrap();
                        assert_eq!(msgs.len(), 1);
                        msgs[0].clone()
                };
-               node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
+               node.messenger.handle_onion_message(&prev_node.node_id, &onion_msg);
                prev_node = node;
        }
 }
@@ -204,9 +227,10 @@ fn one_unblinded_hop() {
 
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -217,10 +241,11 @@ fn two_unblinded_hops() {
        let test_msg = TestCustomMessage::Response;
 
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk()],
-               destination: Destination::Node(nodes[2].get_node_pk()),
+               intermediate_nodes: vec![nodes[1].node_id],
+               destination: Destination::Node(nodes[2].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -231,12 +256,13 @@ fn one_blinded_hop() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk()], &*nodes[1].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -247,13 +273,14 @@ fn two_unblinded_two_blinded() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[3].get_node_pk(), nodes[4].get_node_pk()], &*nodes[4].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[3].node_id, nodes[4].node_id], &*nodes[4].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk(), nodes[2].get_node_pk()],
+               intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[4].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -264,13 +291,14 @@ fn three_blinded_hops() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -281,13 +309,14 @@ fn too_big_packet_error() {
        let nodes = create_nodes(2);
        let test_msg = TestCustomMessage::Response;
 
-       let hop_node_id = nodes[1].get_node_pk();
+       let hop_node_id = nodes[1].node_id;
        let hops = vec![hop_node_id; 400];
        let path = OnionMessagePath {
                intermediate_nodes: hops,
                destination: Destination::Node(hop_node_id),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::TooBigPacket);
 }
 
@@ -299,23 +328,25 @@ fn we_are_intro_node() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[0].get_node_pk(), nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg.clone(), None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap();
        nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 
        // Try with a two-hop blinded path where we are the introduction node.
-       let blinded_path = BlindedPath::new_for_message(&[nodes[0].get_node_pk(), nodes[1].get_node_pk()], &*nodes[1].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        nodes.remove(2);
        pass_along_path(&nodes);
@@ -329,13 +360,14 @@ fn invalid_blinded_path_error() {
 
        // 0 hops
        let secp_ctx = Secp256k1::new();
-       let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
        blinded_path.blinded_hops.clear();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg.clone(), None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap_err();
        assert_eq!(err, SendError::TooFewBlindedHops);
 }
 
@@ -347,11 +379,12 @@ fn reply_path() {
 
        // Destination::Node
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk(), nodes[2].get_node_pk()],
-               destination: Destination::Node(nodes[3].get_node_pk()),
+               intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id],
+               destination: Destination::Node(nodes[3].node_id),
+               first_node_addresses: None,
        };
-       let reply_path = BlindedPath::new_for_message(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
-       nodes[0].messenger.send_onion_message(path, test_msg.clone(), Some(reply_path)).unwrap();
+       let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), Some(reply_path)).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
        pass_along_path(&nodes);
        // Make sure the last node successfully decoded the reply path.
@@ -360,14 +393,15 @@ fn reply_path() {
        pass_along_path(&nodes);
 
        // Destination::BlindedPath
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       let reply_path = BlindedPath::new_for_message(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+       let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
 
-       nodes[0].messenger.send_onion_message(path, test_msg, Some(reply_path)).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, Some(reply_path)).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
        pass_along_path(&nodes);
 
@@ -397,9 +431,10 @@ fn invalid_custom_message_type() {
        let test_msg = InvalidCustomMessage {};
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::InvalidMessage);
 }
 
@@ -409,12 +444,13 @@ fn peer_buffer_full() {
        let test_msg = TestCustomMessage::Request;
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
        for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
-               nodes[0].messenger.send_onion_message(path.clone(), test_msg.clone(), None).unwrap();
+               nodes[0].messenger.send_onion_message_using_path(path.clone(), test_msg.clone(), None).unwrap();
        }
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::BufferFull);
 }
 
@@ -428,51 +464,97 @@ fn many_hops() {
 
        let mut intermediate_nodes = vec![];
        for i in 1..(num_nodes-1) {
-               intermediate_nodes.push(nodes[i].get_node_pk());
+               intermediate_nodes.push(nodes[i].node_id);
        }
 
        let path = OnionMessagePath {
                intermediate_nodes,
-               destination: Destination::Node(nodes[num_nodes-1].get_node_pk()),
+               destination: Destination::Node(nodes[num_nodes-1].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[num_nodes-1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
 
 #[test]
-fn spec_test_vector() {
-       let keys_mgrs = vec![
-               (Arc::new(test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet)), // Alice
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4141414141414141414141414141414141414141414141414141414141414141").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[1; 32], Network::Testnet)), // Bob
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4242424242424242424242424242424242424242424242424242424242424242").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[2; 32], Network::Testnet)), // Carol
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4343434343434343434343434343434343434343434343434343434343434343").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[3; 32], Network::Testnet)), // Dave
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4444444444444444444444444444444444444444444444444444444444444444").unwrap()).unwrap()))),
-       ];
-       let message_router = Arc::new(TestMessageRouter {});
-       let offers_message_handler = Arc::new(TestOffersMessageHandler {});
-       let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
-       let mut nodes = Vec::new();
-       for (idx, (entropy_source, node_signer)) in keys_mgrs.iter().enumerate() {
-               let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", idx)));
-               nodes.push(OnionMessenger::new(
-                       entropy_source.clone(), node_signer.clone(), logger.clone(), message_router.clone(),
-                       offers_message_handler.clone(), custom_message_handler.clone()
-               ));
+fn requests_peer_connection_for_buffered_messages() {
+       let nodes = create_nodes(3);
+       let message = TestCustomMessage::Request;
+       let secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_message(
+               &[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
+       ).unwrap();
+       let destination = Destination::BlindedPath(blinded_path);
+
+       // Buffer an onion message for a connected peer
+       nodes[0].messenger.send_onion_message(message.clone(), destination.clone(), None).unwrap();
+       assert!(release_events(&nodes[0]).is_empty());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_some());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+
+       // Buffer an onion message for a disconnected peer
+       disconnect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+       nodes[0].messenger.send_onion_message(message, destination, None).unwrap();
+
+       // Check that a ConnectionNeeded event for the peer is provided
+       let events = release_events(&nodes[0]);
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               Event::ConnectionNeeded { node_id, .. } => assert_eq!(*node_id, nodes[1].node_id),
+               e => panic!("Unexpected event: {:?}", e),
+       }
+
+       // Release the buffered onion message when reconnected
+       connect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_some());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+}
+
+#[test]
+fn drops_buffered_messages_waiting_for_peer_connection() {
+       let nodes = create_nodes(3);
+       let message = TestCustomMessage::Request;
+       let secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_message(
+               &[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
+       ).unwrap();
+       let destination = Destination::BlindedPath(blinded_path);
+
+       // Buffer an onion message for a disconnected peer
+       disconnect_peers(&nodes[0], &nodes[1]);
+       nodes[0].messenger.send_onion_message(message, destination, None).unwrap();
+
+       // Release the event so the timer can start ticking
+       let events = release_events(&nodes[0]);
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               Event::ConnectionNeeded { node_id, .. } => assert_eq!(*node_id, nodes[1].node_id),
+               e => panic!("Unexpected event: {:?}", e),
        }
-       for idx in 0..nodes.len() - 1 {
-               let i = idx as usize;
-               let mut features = InitFeatures::empty();
-               features.set_onion_messages_optional();
-               let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
-               nodes[i].peer_connected(
-                       &keys_mgrs[i + 1].1.get_node_id(Recipient::Node).unwrap(), &init_msg.clone(), true).unwrap();
-               nodes[i + 1].peer_connected(
-                       &keys_mgrs[i].1.get_node_id(Recipient::Node).unwrap(), &init_msg.clone(), false).unwrap();
+
+       // Drop buffered messages for a disconnected peer after some timer ticks
+       use crate::onion_message::messenger::MAX_TIMER_TICKS;
+       for _ in 0..=MAX_TIMER_TICKS {
+               nodes[0].messenger.timer_tick_occurred();
        }
+       connect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+}
+
+#[test]
+fn spec_test_vector() {
+       let secret_keys = [
+               "4141414141414141414141414141414141414141414141414141414141414141", // Alice
+               "4242424242424242424242424242424242424242424242424242424242424242", // Bob
+               "4343434343434343434343434343434343434343434343434343434343434343", // Carol
+               "4444444444444444444444444444444444444444444444444444444444444444", // Dave
+       ]
+               .iter()
+               .map(|secret| SecretKey::from_slice(&<Vec<u8>>::from_hex(secret).unwrap()).unwrap())
+               .collect();
+       let nodes = create_nodes_using_secrets(secret_keys);
 
        // Hardcode the sender->Alice onion message, because it includes an unknown TLV of type 1, which
        // LDK doesn't support constructing.
@@ -491,24 +573,18 @@ fn spec_test_vector() {
        // which is why the asserted strings differ slightly from the spec.
        assert_eq!(sender_to_alice_om.encode(), <Vec<u8>>::from_hex("031195a8046dcbb8e17034bca630065e7a0982e4e36f6f7e5a8d4554e4846fcd9905560002531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33793b828776d70aabbd8cef1a5b52d5a397ae1a20f20435ff6057cd8be339d5aee226660ef73b64afa45dbf2e6e8e26eb96a259b2db5aeecda1ce2e768bbc35d389d7f320ca3d2bd14e2689bef2f5ac0307eaaabc1924eb972c1563d4646ae131accd39da766257ed35ea36e4222527d1db4fa7b2000aab9eafcceed45e28b5560312d4e2299bd8d1e7fe27d10925966c28d497aec400b4630485e82efbabc00550996bdad5d6a9a8c75952f126d14ad2cff91e16198691a7ef2937de83209285f1fb90944b4e46bca7c856a9ce3da10cdf2a7d00dc2bf4f114bc4d3ed67b91cbde558ce9af86dc81fbdc37f8e301b29e23c1466659c62bdbf8cff5d4c20f0fb0851ec72f5e9385dd40fdd2e3ed67ca4517117825665e50a3e26f73c66998daf18e418e8aef9ce2d20da33c3629db2933640e03e7b44c2edf49e9b482db7b475cfd4c617ae1d46d5c24d697846f9f08561eac2b065f9b382501f6eabf07343ed6c602f61eab99cdb52adf63fd44a8db2d3016387ea708fc1c08591e19b4d9984ebe31edbd684c2ea86526dd8c7732b1d8d9117511dc1b643976d356258fce8313b1cb92682f41ab72dedd766f06de375f9edacbcd0ca8c99b865ea2b7952318ea1fd20775a28028b5cf59dece5de14f615b8df254eee63493a5111ea987224bea006d8f1b60d565eef06ac0da194dba2a6d02e79b2f2f34e9ca6e1984a507319d86e9d4fcaeea41b4b9144e0b1826304d4cc1da61cfc5f8b9850697df8adc5e9d6f3acb3219b02764b4909f2b2b22e799fd66c383414a84a7d791b899d4aa663770009eb122f90282c8cb9cda16aba6897edcf9b32951d0080c0f52be3ca011fbec3fb16423deb47744645c3b05fdbd932edf54ba6efd26e65340a8e9b1d1216582e1b30d64524f8ca2d6c5ba63a38f7120a3ed71bed8960bcac2feee2dd41c90be48e3c11ec518eb3d872779e4765a6cc28c6b0fa71ab57ced73ae963cc630edae4258cba2bf25821a6ae049fec2fca28b5dd1bb004d92924b65701b06dcf37f0ccd147a13a03f9bc0f98b7d78fe9058089756931e2cd0e0ed92ec6759d07b248069526c67e9e6ce095118fd3501ba0f858ef030b76c6f6beb11a09317b5ad25343f4b31aef02bc555951bc7791c2c289ecf94d5544dcd6ad3021ed8e8e3db34b2a73e1eedb57b578b068a5401836d6e382110b73690a94328c404af25e85a8d6b808893d1b71af6a31fadd8a8cc6e31ecc0d9ff7e6b91fd03c274a5c1f1ccd25b61150220a3fddb04c91012f5f7a83a5c90deb2470089d6e38cd5914b9c946eca6e9d31bbf8667d36cf87effc3f3ff283c21dd4137bd569fe7cf758feac94053e4baf7338bb592c8b7c291667fadf4a9bf9a2a154a18f612cbc7f851b3f8f2070e0a9d180622ee4f8e81b0ab250d504cef24116a3ff188cc829fcd8610b56343569e8dc997629410d1967ca9dd1d27eec5e01e4375aad16c46faba268524b154850d0d6fe3a76af2c6aa3e97647c51036049ac565370028d6a439a2672b6face56e1b171496c0722cfa22d9da631be359661617c5d5a2d286c5e19db9452c1e21a0107b6400debda2decb0c838f342dd017cdb2dccdf1fe97e3df3f881856b546997a3fed9e279c720145101567dd56be21688fed66bf9759e432a9aa89cbbd225d13cdea4ca05f7a45cfb6a682a3d5b1e18f7e6cf934fae5098108bae9058d05c3387a01d8d02a656d2bfff67e9f46b2d8a6aac28129e52efddf6e552214c3f8a45bc7a912cca9a7fec1d7d06412c6972cb9e3dc518983f56530b8bffe7f92c4b6eb47d4aef59fb513c4653a42de61bc17ad7728e7fc7590ff05a9e991de03f023d0aaf8688ed6170def5091c66576a424ac1cb").unwrap());
        let sender_dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap();
-       nodes[0].handle_onion_message(&sender_dummy_node_id, &sender_to_alice_om);
-       let alice_to_bob_om = nodes[0].next_onion_message_for_peer(
-               keys_mgrs[1].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[0].messenger.handle_onion_message(&sender_dummy_node_id, &sender_to_alice_om);
+       let alice_to_bob_om = nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).unwrap();
        assert_eq!(alice_to_bob_om.encode(), <Vec<u8>>::from_hex("031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f05560002536d53f93796cad550b6c68662dca41f7e8c221c31022c64dd1a627b2df3982b25eac261e88369cfc66e1e3b6d9829cb3dcd707046e68a7796065202a7904811bf2608c5611cf74c9eb5371c7eb1a4428bb39a041493e2a568ddb0b2482a6cc6711bc6116cef144ebf988073cb18d9dd4ce2d3aa9de91a7dc6d7c6f11a852024626e66b41ba1158055505dff9cb15aa51099f315564d9ee3ed6349665dc3e209eedf9b5805ee4f69d315df44c80e63d0e2efbdab60ec96f44a3447c6a6ddb1efb6aa4e072bde1dab974081646bfddf3b02daa2b83847d74dd336465e76e9b8fecc2b0414045eeedfc39939088a76820177dd1103c99939e659beb07197bab9f714b30ba8dc83738e9a6553a57888aaeda156c68933a2f4ff35e3f81135076b944ed9856acbfee9c61299a5d1763eadd14bf5eaf71304c8e165e590d7ecbcd25f1650bf5b6c2ad1823b2dc9145e168974ecf6a2273c94decff76d94bc6708007a17f22262d63033c184d0166c14f41b225a956271947aae6ce65890ed8f0d09c6ffe05ec02ee8b9de69d7077a0c5adeb813aabcc1ba8975b73ab06ddea5f4db3c23a1de831602de2b83f990d4133871a1a81e53f86393e6a7c3a7b73f0c099fa72afe26c3027bb9412338a19303bd6e6591c04fb4cde9b832b5f41ae199301ea8c303b5cef3aca599454273565de40e1148156d1f97c1aa9e58459ab318304075e034f5b7899c12587b86776a18a1da96b7bcdc22864fccc4c41538ebce92a6f054d53bf46770273a70e75fe0155cd6d2f2e937465b0825ce3123b8c206fac4c30478fa0f08a97ade7216dce11626401374993213636e93545a31f500562130f2feb04089661ad8c34d5a4cbd2e4e426f37cb094c786198a220a2646ecadc38c04c29ee67b19d662c209a7b30bfecc7fe8bf7d274de0605ee5df4db490f6d32234f6af639d3fce38a2801bcf8d51e9c090a6c6932355a83848129a378095b34e71cb8f51152dc035a4fe8e802fec8de221a02ba5afd6765ce570bef912f87357936ea0b90cb2990f56035e89539ec66e8dbd6ed50835158614096990e019c3eba3d7dd6a77147641c6145e8b17552cd5cf7cd163dd40b9eaeba8c78e03a2cd8c0b7997d6f56d35f38983a202b4eb8a54e14945c4de1a6dde46167e11708b7a5ff5cb9c0f7fc12fae49a012aa90bb1995c038130b749c48e6f1ffb732e92086def42af10fbc460d94abeb7b2fa744a5e9a491d62a08452be8cf2fdef573deedc1fe97098bce889f98200b26f9bb99da9aceddda6d793d8e0e44a2601ef4590cfbb5c3d0197aac691e3d31c20fd8e38764962ca34dabeb85df28feabaf6255d4d0df3d814455186a84423182caa87f9673df770432ad8fdfe78d4888632d460d36d2719e8fa8e4b4ca10d817c5d6bc44a8b2affab8c2ba53b8bf4994d63286c2fad6be04c28661162fa1a67065ecda8ba8c13aee4a8039f4f0110e0c0da2366f178d8903e19136dad6df9d8693ce71f3a270f9941de2a93d9b67bc516207ac1687bf6e00b29723c42c7d9c90df9d5e599dbeb7b73add0a6a2b7aba82f98ac93cb6e60494040445229f983a81c34f7f686d166dfc98ec23a6318d4a02a311ac28d655ea4e0f9c3014984f31e621ef003e98c373561d9040893feece2e0fa6cd2dd565e6fbb2773a2407cb2c3273c306cf71f427f2e551c4092e067cf9869f31ac7c6c80dd52d4f85be57a891a41e34be0d564e39b4af6f46b85339254a58b205fb7e10e7d0470ee73622493f28c08962118c23a1198467e72c4ae1cd482144b419247a5895975ea90d135e2a46ef7e5794a1551a447ff0a0d299b66a7f565cd86531f5e7af5408d85d877ce95b1df12b88b7d5954903a5296325ba478ba1e1a9d1f30a2d5052b2e2889bbd64f72c72bc71d8817288a2").unwrap());
-       nodes[1].handle_onion_message(
-               &keys_mgrs[0].1.get_node_id(Recipient::Node).unwrap(), &alice_to_bob_om);
-       let bob_to_carol_om = nodes[1].next_onion_message_for_peer(
-               keys_mgrs[2].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[1].messenger.handle_onion_message(&nodes[0].node_id, &alice_to_bob_om);
+       let bob_to_carol_om = nodes[1].messenger.next_onion_message_for_peer(nodes[2].node_id).unwrap();
        assert_eq!(bob_to_carol_om.encode(), <Vec<u8>>::from_hex("02b684babfd400c8dd48b367e9754b8021a3594a34dc94d7101776c7f6a86d0582055600029a77e8523162efa1f4208f4f2050cd5c386ddb6ce6d36235ea569d217ec52209fb85fdf7dbc4786c373eebdba0ddc184cfbe6da624f610e93f62c70f2c56be1090b926359969f040f932c03f53974db5656233bd60af375517d4323002937d784c2c88a564bcefe5c33d3fc21c26d94dfacab85e2e19685fd2ff4c543650958524439b6da68779459aee5ffc9dc543339acec73ff43be4c44ddcbe1c11d50e2411a67056ba9db7939d780f5a86123fdd3abd6f075f7a1d78ab7daf3a82798b7ec1e9f1345bc0d1e935098497067e2ae5a51ece396fcb3bb30871ad73aee51b2418b39f00c8e8e22be4a24f4b624e09cb0414dd46239de31c7be035f71e8da4f5a94d15b44061f46414d3f355069b5c5b874ba56704eb126148a22ec873407fe118972127e63ff80e682e410f297f23841777cec0517e933eaf49d7e34bd203266b42081b3a5193b51ccd34b41342bc67cf73523b741f5c012ba2572e9dda15fbe131a6ac2ff24dc2a7622d58b9f3553092cfae7fae3c8864d95f97aa49ec8edeff5d9f5782471160ee412d82ff6767030fc63eec6a93219a108cd41433834b26676a39846a944998796c79cd1cc460531b8ded659cedfd8aecefd91944f00476f1496daafb4ea6af3feacac1390ea510709783c2aa81a29de27f8959f6284f4684102b17815667cbb0645396ac7d542b878d90c42a1f7f00c4c4eedb2a22a219f38afadb4f1f562b6e000a94e75cc38f535b43a3c0384ccef127fde254a9033a317701c710b2b881065723486e3f4d3eea5e12f374a41565fe43fa137c1a252c2153dde055bb343344c65ad0529010ece29bbd405effbebfe3ba21382b94a60ac1a5ffa03f521792a67b30773cb42e862a8a02a8bbd41b842e115969c87d1ff1f8c7b5726b9f20772dd57fe6e4ea41f959a2a673ffad8e2f2a472c4c8564f3a5a47568dd75294b1c7180c500f7392a7da231b1fe9e525ea2d7251afe9ca52a17fe54a116cb57baca4f55b9b6de915924d644cba9dade4ccc01939d7935749c008bafc6d3ad01cd72341ce5ddf7a5d7d21cf0465ab7a3233433aef21f9acf2bfcdc5a8cc003adc4d82ac9d72b36eb74e05c9aa6ccf439ac92e6b84a3191f0764dd2a2e0b4cc3baa08782b232ad6ecd3ca6029bc08cc094aef3aebddcaddc30070cb6023a689641de86cfc6341c8817215a4650f844cd2ca60f2f10c6e44cfc5f23912684d4457bf4f599879d30b79bf12ef1ab8d34dddc15672b82e56169d4c770f0a2a7a960b1e8790773f5ff7fce92219808f16d061cc85e053971213676d28fb48925e9232b66533dbd938458eb2cc8358159df7a2a2e4cf87500ede2afb8ce963a845b98978edf26a6948d4932a6b95d022004556d25515fe158092ce9a913b4b4a493281393ca731e8d8e5a3449b9d888fc4e73ffcbb9c6d6d66e88e03cf6e81a0496ede6e4e4172b08c000601993af38f80c7f68c9d5fff9e0e215cff088285bf039ca731744efcb7825a272ca724517736b4890f47e306b200aa2543c363e2c9090bcf3cf56b5b86868a62471c7123a41740392fc1d5ab28da18dca66618e9af7b42b62b23aba907779e73ca03ec60e6ab9e0484b9cae6578e0fddb6386cb3468506bf6420298bf4a690947ab582255551d82487f271101c72e19e54872ab47eae144db66bc2f8194a666a5daec08d12822cb83a61946234f2dfdbd6ca7d8763e6818adee7b401fcdb1ac42f9df1ac5cc5ac131f2869013c8d6cd29d4c4e3d05bccd34ca83366d616296acf854fa05149bfd763a25b9938e96826a037fdcb85545439c76df6beed3bdbd01458f9cf984997cc4f0a7ac3cc3f5e1eeb59c09cadcf5a537f16e444149c8f17d4bdaef16c9fbabc5ef06eb0f0bf3a07a1beddfeacdaf1df5582d6dbd6bb808d6ab31bc22e5d7").unwrap());
-       nodes[2].handle_onion_message(
-               &keys_mgrs[1].1.get_node_id(Recipient::Node).unwrap(), &bob_to_carol_om);
-       let carol_to_dave_om = nodes[2].next_onion_message_for_peer(
-               keys_mgrs[3].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[2].messenger.handle_onion_message(&nodes[1].node_id, &bob_to_carol_om);
+       let carol_to_dave_om = nodes[2].messenger.next_onion_message_for_peer(nodes[3].node_id).unwrap();
        assert_eq!(carol_to_dave_om.encode(), <Vec<u8>>::from_hex("025aaca62db7ce6b46386206ef9930daa32e979a35cb185a41cb951aa7d254b03c055600025550b2910294fa73bda99b9de9c851be9cbb481e23194a1743033630efba546b86e7d838d0f6e9cc0ed088dbf6889f0dceca3bfc745bd77d013a31311fa932a8bf1d28387d9ff521eabc651dee8f861fed609a68551145a451f017ec44978addeee97a423c08445531da488fd1ddc998e9cdbfcea59517b53fbf1833f0bbe6188dba6ca773a247220ec934010daca9cc185e1ceb136803469baac799e27a0d82abe53dc48a06a55d1f643885cc7894677dd20a4e4152577d1ba74b870b9279f065f9b340cedb3ca13b7df218e853e10ccd1b59c42a2acf93f489e170ee4373d30ab158b60fc20d3ba73a1f8c750951d69fb5b9321b968ddc8114936412346aff802df65516e1c09c51ef19849ff36c0199fd88c8bec301a30fef0c7cb497901c038611303f64e4174b5daf42832aa5586b84d2c9b95f382f4269a5d1bd4be898618dc78dfd451170f72ca16decac5b03e60702112e439cadd104fb3bbb3d5023c9b80823fdcd0a212a7e1aaa6eeb027adc7f8b3723031d135a09a979a4802788bb7861c6cc85501fb91137768b70aeab309b27b885686604ffc387004ac4f8c44b101c39bc0597ef7fd957f53fc5051f534b10eb3852100962b5e58254e5558689913c26ad6072ea41f5c5db10077cfc91101d4ae393be274c74297da5cc381cd88d54753aaa7df74b2f9da8d88a72bc9218fcd1f19e4ff4aace182312b9509c5175b6988f044c5756d232af02a451a02ca752f3c52747773acff6fd07d2032e6ce562a2c42105d106eba02d0b1904182cdc8c74875b082d4989d3a7e9f0e73de7c75d357f4af976c28c0b206c5e8123fc2391d078592d0d5ff686fd245c0a2de2e535b7cca99c0a37d432a8657393a9e3ca53eec1692159046ba52cb9bc97107349d8673f74cbc97e231f1108005c8d03e24ca813cea2294b39a7a493bcc062708f1f6cf0074e387e7d50e0666ce784ef4d31cb860f6cad767438d9ea5156ff0ae86e029e0247bf94df75ee0cda4f2006061455cb2eaff513d558863ae334cef7a3d45f55e7cc13153c6719e9901c1d4db6c03f643b69ea4860690305651794284d9e61eb848ccdf5a77794d376f0af62e46d4835acce6fd9eef5df73ebb8ea3bb48629766967f446e744ecc57ff3642c4aa1ccee9a2f72d5caa75fa05787d08b79408fce792485fdecdc25df34820fb061275d70b84ece540b0fc47b2453612be34f2b78133a64e812598fbe225fd85415f8ffe5340ce955b5fd9d67dd88c1c531dde298ed25f96df271558c812c26fa386966c76f03a6ebccbca49ac955916929bd42e134f982dde03f924c464be5fd1ba44f8dc4c3cbc8162755fd1d8f7dc044b15b1a796c53df7d8769bb167b2045b49cc71e08908796c92c16a235717cabc4bb9f60f8f66ff4fff1f9836388a99583acebdff4a7fb20f48eedcd1f4bdcc06ec8b48e35307df51d9bc81d38a94992dd135b30079e1f592da6e98dff496cb1a7776460a26b06395b176f585636ebdf7eab692b227a31d6979f5a6141292698e91346b6c806b90c7c6971e481559cae92ee8f4136f2226861f5c39ddd29bbdb118a35dece03f49a96804caea79a3dacfbf09d65f2611b5622de51d98e18151acb3bb84c09caaa0cc80edfa743a4679f37d6167618ce99e73362fa6f213409931762618a61f1738c071bba5afc1db24fe94afb70c40d731908ab9a505f76f57a7d40e708fd3df0efc5b7cbb2a7b75cd23449e09684a2f0e2bfa0d6176c35f96fe94d92fc9fa4103972781f81cb6e8df7dbeb0fc529c600d768bed3f08828b773d284f69e9a203459d88c12d6df7a75be2455fec128f07a497a2b2bf626cc6272d0419ca663e9dc66b8224227eb796f0246dcae9c5b0b6cfdbbd40c3245a610481c92047c968c9fc92c04b89cc41a0c15355a8f").unwrap());
        // Dave handles the onion message but he'll log that he errored while decoding the hop data
        // because he sees it as an empty onion message (the only contents of the sender's OM is "hello"
        // with TLV type 1, which Dave ignores because (1) it's odd and he can't understand it and (2) LDK
        // only attempts to parse custom OM TLVs with type > 64).
-       nodes[3].handle_onion_message(
-               &keys_mgrs[2].1.get_node_id(Recipient::Node).unwrap(), &carol_to_dave_om);
+       nodes[3].messenger.handle_onion_message(&nodes[2].node_id, &carol_to_dave_om);
 }
index c7f01ae597862713ab1e3bde1d1f4c8b8f4b4a0d..21a1b302da09b76fa3f73cbc70953fa46919c3c6 100644 (file)
@@ -18,13 +18,15 @@ use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
 use crate::blinded_path::BlindedPath;
 use crate::blinded_path::message::{advance_path_by_one, ForwardTlvs, ReceiveTlvs};
 use crate::blinded_path::utils;
+use crate::events::{Event, EventHandler, EventsProvider};
 use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient};
 #[cfg(not(c_bindings))]
 use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use crate::ln::features::{InitFeatures, NodeFeatures};
-use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler};
+use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
 use crate::ln::onion_utils;
 use crate::ln::peer_handler::IgnoringMessageHandler;
+use crate::routing::gossip::{NetworkGraph, NodeId};
 pub use super::packet::OnionMessageContents;
 use super::packet::ParsedOnionMessageContents;
 use super::offers::OffersMessageHandler;
@@ -38,6 +40,8 @@ use crate::io;
 use crate::sync::{Arc, Mutex};
 use crate::prelude::*;
 
+pub(super) const MAX_TIMER_TICKS: usize = 2;
+
 /// A sender, receiver and forwarder of [`OnionMessage`]s.
 ///
 /// # Handling Messages
@@ -76,7 +80,15 @@ use crate::prelude::*;
 /// # struct FakeMessageRouter {}
 /// # impl MessageRouter for FakeMessageRouter {
 /// #     fn find_path(&self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination) -> Result<OnionMessagePath, ()> {
-/// #         unimplemented!()
+/// #         let secp_ctx = Secp256k1::new();
+/// #         let node_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+/// #         let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
+/// #         let hop_node_id2 = hop_node_id1;
+/// #         Ok(OnionMessagePath {
+/// #             intermediate_nodes: vec![hop_node_id1, hop_node_id2],
+/// #             destination,
+/// #             first_node_addresses: None,
+/// #         })
 /// #     }
 /// # }
 /// # let seed = [42u8; 32];
@@ -86,7 +98,7 @@ use crate::prelude::*;
 /// # let node_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
 /// # let secp_ctx = Secp256k1::new();
 /// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
-/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1, hop_node_id1);
+/// # let (hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1);
 /// # let destination_node_id = hop_node_id1;
 /// # let message_router = Arc::new(FakeMessageRouter {});
 /// # let custom_message_handler = IgnoringMessageHandler {};
@@ -113,13 +125,10 @@ use crate::prelude::*;
 ///    }
 /// }
 /// // Send a custom onion message to a node id.
-/// let path = OnionMessagePath {
-///    intermediate_nodes: vec![hop_node_id1, hop_node_id2],
-///    destination: Destination::Node(destination_node_id),
-/// };
+/// let destination = Destination::Node(destination_node_id);
 /// let reply_path = None;
 /// # let message = YourCustomMessage {};
-/// onion_messenger.send_onion_message(path, message, reply_path);
+/// onion_messenger.send_onion_message(message, destination, reply_path);
 ///
 /// // Create a blinded path to yourself, for someone to send an onion message to.
 /// # let your_node_id = hop_node_id1;
@@ -127,13 +136,10 @@ use crate::prelude::*;
 /// let blinded_path = BlindedPath::new_for_message(&hops, &keys_manager, &secp_ctx).unwrap();
 ///
 /// // Send a custom onion message to a blinded path.
-/// let path = OnionMessagePath {
-///    intermediate_nodes: vec![hop_node_id1, hop_node_id2],
-///    destination: Destination::BlindedPath(blinded_path),
-/// };
+/// let destination = Destination::BlindedPath(blinded_path);
 /// let reply_path = None;
 /// # let message = YourCustomMessage {};
-/// onion_messenger.send_onion_message(path, message, reply_path);
+/// onion_messenger.send_onion_message(message, destination, reply_path);
 /// ```
 ///
 /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
@@ -145,18 +151,87 @@ where
        L::Target: Logger,
        MR::Target: MessageRouter,
        OMH::Target: OffersMessageHandler,
-       CMH:: Target: CustomOnionMessageHandler,
+       CMH::Target: CustomOnionMessageHandler,
 {
        entropy_source: ES,
        node_signer: NS,
        logger: L,
-       pending_messages: Mutex<HashMap<PublicKey, VecDeque<OnionMessage>>>,
+       message_recipients: Mutex<HashMap<PublicKey, OnionMessageRecipient>>,
        secp_ctx: Secp256k1<secp256k1::All>,
        message_router: MR,
        offers_handler: OMH,
        custom_handler: CMH,
 }
 
+/// [`OnionMessage`]s buffered to be sent.
+enum OnionMessageRecipient {
+       /// Messages for a node connected as a peer.
+       ConnectedPeer(VecDeque<OnionMessage>),
+
+       /// Messages for a node that is not yet connected, which are dropped after [`MAX_TIMER_TICKS`]
+       /// and tracked here.
+       PendingConnection(VecDeque<OnionMessage>, Option<Vec<SocketAddress>>, usize),
+}
+
+impl OnionMessageRecipient {
+       fn pending_connection(addresses: Vec<SocketAddress>) -> Self {
+               Self::PendingConnection(VecDeque::new(), Some(addresses), 0)
+       }
+
+       fn pending_messages(&self) -> &VecDeque<OnionMessage> {
+               match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               }
+       }
+
+       fn enqueue_message(&mut self, message: OnionMessage) {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               };
+
+               pending_messages.push_back(message);
+       }
+
+       fn dequeue_message(&mut self) -> Option<OnionMessage> {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => {
+                               debug_assert!(false);
+                               pending_messages
+                       },
+               };
+
+               pending_messages.pop_front()
+       }
+
+       #[cfg(test)]
+       fn release_pending_messages(&mut self) -> VecDeque<OnionMessage> {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               };
+
+               core::mem::take(pending_messages)
+       }
+
+       fn mark_connected(&mut self) {
+               if let OnionMessageRecipient::PendingConnection(pending_messages, _, _) = self {
+                       let mut new_pending_messages = VecDeque::new();
+                       core::mem::swap(pending_messages, &mut new_pending_messages);
+                       *self = OnionMessageRecipient::ConnectedPeer(new_pending_messages);
+               }
+       }
+
+       fn is_connected(&self) -> bool {
+               match self {
+                       OnionMessageRecipient::ConnectedPeer(..) => true,
+                       OnionMessageRecipient::PendingConnection(..) => false,
+               }
+       }
+}
+
 /// An [`OnionMessage`] for [`OnionMessenger`] to send.
 ///
 /// These are obtained when released from [`OnionMessenger`]'s handlers after which they are
@@ -198,16 +273,52 @@ pub trait MessageRouter {
 }
 
 /// A [`MessageRouter`] that can only route to a directly connected [`Destination`].
-pub struct DefaultMessageRouter;
+pub struct DefaultMessageRouter<G: Deref<Target=NetworkGraph<L>>, L: Deref>
+where
+       L::Target: Logger,
+{
+       network_graph: G,
+}
 
-impl MessageRouter for DefaultMessageRouter {
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> DefaultMessageRouter<G, L>
+where
+       L::Target: Logger,
+{
+       /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`].
+       pub fn new(network_graph: G) -> Self {
+               Self { network_graph }
+       }
+}
+
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> MessageRouter for DefaultMessageRouter<G, L>
+where
+       L::Target: Logger,
+{
        fn find_path(
                &self, _sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
        ) -> Result<OnionMessagePath, ()> {
-               if peers.contains(&destination.first_node()) {
-                       Ok(OnionMessagePath { intermediate_nodes: vec![], destination })
+               let first_node = destination.first_node();
+               if peers.contains(&first_node) {
+                       Ok(OnionMessagePath {
+                               intermediate_nodes: vec![], destination, first_node_addresses: None
+                       })
                } else {
-                       Err(())
+                       let network_graph = self.network_graph.deref().read_only();
+                       let node_announcement = network_graph
+                               .node(&NodeId::from_pubkey(&first_node))
+                               .and_then(|node_info| node_info.announcement_info.as_ref())
+                               .and_then(|announcement_info| announcement_info.announcement_message.as_ref())
+                               .map(|node_announcement| &node_announcement.contents);
+
+                       match node_announcement {
+                               Some(node_announcement) if node_announcement.features.supports_onion_messages() => {
+                                       let first_node_addresses = Some(node_announcement.addresses.clone());
+                                       Ok(OnionMessagePath {
+                                               intermediate_nodes: vec![], destination, first_node_addresses
+                                       })
+                               },
+                               _ => Err(()),
+                       }
                }
        }
 }
@@ -220,6 +331,22 @@ pub struct OnionMessagePath {
 
        /// The recipient of the message.
        pub destination: Destination,
+
+       /// Addresses that may be used to connect to [`OnionMessagePath::first_node`].
+       ///
+       /// Only needs to be set if a connection to the node is required. [`OnionMessenger`] may use
+       /// this to initiate such a connection.
+       pub first_node_addresses: Option<Vec<SocketAddress>>,
+}
+
+impl OnionMessagePath {
+       /// Returns the first node in the path.
+       pub fn first_node(&self) -> PublicKey {
+               self.intermediate_nodes
+                       .first()
+                       .copied()
+                       .unwrap_or_else(|| self.destination.first_node())
+       }
 }
 
 /// The destination of an onion message.
@@ -247,6 +374,19 @@ impl Destination {
        }
 }
 
+/// Result of successfully [sending an onion message].
+///
+/// [sending an onion message]: OnionMessenger::send_onion_message
+#[derive(Debug, PartialEq, Eq)]
+pub enum SendSuccess {
+       /// The message was buffered and will be sent once it is processed by
+       /// [`OnionMessageHandler::next_onion_message_for_peer`].
+       Buffered,
+       /// The message was buffered and will be sent once the node is connected as a peer and it is
+       /// processed by [`OnionMessageHandler::next_onion_message_for_peer`].
+       BufferedAwaitingConnection(PublicKey),
+}
+
 /// Errors that may occur when [sending an onion message].
 ///
 /// [sending an onion message]: OnionMessenger::send_onion_message
@@ -260,8 +400,10 @@ pub enum SendError {
        /// The provided [`Destination`] was an invalid [`BlindedPath`] due to not having any blinded
        /// hops.
        TooFewBlindedHops,
-       /// Our next-hop peer was offline or does not support onion message forwarding.
-       InvalidFirstHop,
+       /// The first hop is not a peer and doesn't have a known [`SocketAddress`].
+       InvalidFirstHop(PublicKey),
+       /// A path from the sender to the destination could not be found by the [`MessageRouter`].
+       PathNotFound,
        /// Onion message contents must have a TLV type >= 64.
        InvalidMessage,
        /// Our next-hop peer's buffer was full or our total outbound buffer was full.
@@ -328,16 +470,17 @@ pub enum PeeledOnion<T: OnionMessageContents> {
 /// Creates an [`OnionMessage`] with the given `contents` for sending to the destination of
 /// `path`.
 ///
-/// Returns both the node id of the peer to send the message to and the message itself.
+/// Returns the node id of the peer to send the message to, the message itself, and any addresses
+/// need to connect to the first node.
 pub fn create_onion_message<ES: Deref, NS: Deref, T: OnionMessageContents>(
        entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1<secp256k1::All>,
        path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
-) -> Result<(PublicKey, OnionMessage), SendError>
+) -> Result<(PublicKey, OnionMessage, Option<Vec<SocketAddress>>), SendError>
 where
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
 {
-       let OnionMessagePath { intermediate_nodes, mut destination } = path;
+       let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path;
        if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
                if blinded_hops.is_empty() {
                        return Err(SendError::TooFewBlindedHops);
@@ -378,10 +521,8 @@ where
        let onion_routing_packet = construct_onion_message_packet(
                packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
 
-       Ok((first_node_id, OnionMessage {
-               blinding_point,
-               onion_routing_packet
-       }))
+       let message = OnionMessage { blinding_point, onion_routing_packet };
+       Ok((first_node_id, message, first_node_addresses))
 }
 
 /// Decode one layer of an incoming [`OnionMessage`].
@@ -502,7 +643,7 @@ where
                OnionMessenger {
                        entropy_source,
                        node_signer,
-                       pending_messages: Mutex::new(HashMap::new()),
+                       message_recipients: Mutex::new(HashMap::new()),
                        secp_ctx,
                        logger,
                        message_router,
@@ -511,38 +652,113 @@ where
                }
        }
 
-       /// Sends an [`OnionMessage`] with the given `contents` for sending to the destination of
-       /// `path`.
+       /// Sends an [`OnionMessage`] with the given `contents` to `destination`.
        ///
        /// See [`OnionMessenger`] for example usage.
        pub fn send_onion_message<T: OnionMessageContents>(
-               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
-       ) -> Result<(), SendError> {
+               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>
+       ) -> Result<SendSuccess, SendError> {
+               self.find_path_and_enqueue_onion_message(
+                       contents, destination, reply_path, format_args!("")
+               )
+       }
+
+       fn find_path_and_enqueue_onion_message<T: OnionMessageContents>(
+               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
+               log_suffix: fmt::Arguments
+       ) -> Result<SendSuccess, SendError> {
+               let result = self.find_path(destination)
+                       .and_then(|path| self.enqueue_onion_message(path, contents, reply_path, log_suffix));
+
+               match result.as_ref() {
+                       Err(SendError::GetNodeIdFailed) => {
+                               log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
+                       },
+                       Err(SendError::PathNotFound) => {
+                               log_trace!(self.logger, "Failed to find path {}", log_suffix);
+                       },
+                       Err(e) => {
+                               log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
+                       },
+                       Ok(SendSuccess::Buffered) => {
+                               log_trace!(self.logger, "Buffered onion message {}", log_suffix);
+                       },
+                       Ok(SendSuccess::BufferedAwaitingConnection(node_id)) => {
+                               log_trace!(
+                                       self.logger, "Buffered onion message waiting on peer connection {}: {:?}",
+                                       log_suffix, node_id
+                               );
+                       },
+               }
+
+               result
+       }
+
+       fn find_path(&self, destination: Destination) -> Result<OnionMessagePath, SendError> {
+               let sender = self.node_signer
+                       .get_node_id(Recipient::Node)
+                       .map_err(|_| SendError::GetNodeIdFailed)?;
 
-               log_trace!(self.logger, "Sending onion message: {:?}", contents);
-               
-               let (first_node_id, onion_msg) = create_onion_message(
+               let peers = self.message_recipients.lock().unwrap()
+                       .iter()
+                       .filter(|(_, recipient)| matches!(recipient, OnionMessageRecipient::ConnectedPeer(_)))
+                       .map(|(node_id, _)| *node_id)
+                       .collect();
+
+               self.message_router
+                       .find_path(sender, peers, destination)
+                       .map_err(|_| SendError::PathNotFound)
+       }
+
+       fn enqueue_onion_message<T: OnionMessageContents>(
+               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
+               log_suffix: fmt::Arguments
+       ) -> Result<SendSuccess, SendError> {
+               log_trace!(self.logger, "Constructing onion message {}: {:?}", log_suffix, contents);
+
+               let (first_node_id, onion_message, addresses) = create_onion_message(
                        &self.entropy_source, &self.node_signer, &self.secp_ctx, path, contents, reply_path
                )?;
 
-               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
-               if outbound_buffer_full(&first_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
-               match pending_per_peer_msgs.entry(first_node_id) {
-                       hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
+               let mut message_recipients = self.message_recipients.lock().unwrap();
+               if outbound_buffer_full(&first_node_id, &message_recipients) {
+                       return Err(SendError::BufferFull);
+               }
+
+               match message_recipients.entry(first_node_id) {
+                       hash_map::Entry::Vacant(e) => match addresses {
+                               None => Err(SendError::InvalidFirstHop(first_node_id)),
+                               Some(addresses) => {
+                                       e.insert(OnionMessageRecipient::pending_connection(addresses))
+                                               .enqueue_message(onion_message);
+                                       Ok(SendSuccess::BufferedAwaitingConnection(first_node_id))
+                               },
+                       },
                        hash_map::Entry::Occupied(mut e) => {
-                               e.get_mut().push_back(onion_msg);
-                               Ok(())
-                       }
+                               e.get_mut().enqueue_message(onion_message);
+                               if e.get().is_connected() {
+                                       Ok(SendSuccess::Buffered)
+                               } else {
+                                       Ok(SendSuccess::BufferedAwaitingConnection(first_node_id))
+                               }
+                       },
                }
        }
 
+       #[cfg(test)]
+       pub(super) fn send_onion_message_using_path<T: OnionMessageContents>(
+               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
+       ) -> Result<SendSuccess, SendError> {
+               self.enqueue_onion_message(path, contents, reply_path, format_args!(""))
+       }
+
        fn handle_onion_message_response<T: OnionMessageContents>(
                &self, response: Option<T>, reply_path: Option<BlindedPath>, log_suffix: fmt::Arguments
        ) {
                if let Some(response) = response {
                        match reply_path {
                                Some(reply_path) => {
-                                       self.find_path_and_enqueue_onion_message(
+                                       let _ = self.find_path_and_enqueue_onion_message(
                                                response, Destination::BlindedPath(reply_path), None, log_suffix
                                        );
                                },
@@ -553,55 +769,26 @@ where
                }
        }
 
-       fn find_path_and_enqueue_onion_message<T: OnionMessageContents>(
-               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
-               log_suffix: fmt::Arguments
-       ) {
-               let sender = match self.node_signer.get_node_id(Recipient::Node) {
-                       Ok(node_id) => node_id,
-                       Err(_) => {
-                               log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
-                               return;
-                       }
-               };
-
-               let peers = self.pending_messages.lock().unwrap().keys().copied().collect();
-               let path = match self.message_router.find_path(sender, peers, destination) {
-                       Ok(path) => path,
-                       Err(()) => {
-                               log_trace!(self.logger, "Failed to find path {}", log_suffix);
-                               return;
-                       },
-               };
-
-               log_trace!(self.logger, "Sending onion message {}: {:?}", log_suffix, contents);
-
-               if let Err(e) = self.send_onion_message(path, contents, reply_path) {
-                       log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
-                       return;
-               }
-       }
-
        #[cfg(test)]
        pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<OnionMessage>> {
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               let mut message_recipients = self.message_recipients.lock().unwrap();
                let mut msgs = HashMap::new();
                // We don't want to disconnect the peers by removing them entirely from the original map, so we
-               // swap the pending message buffers individually.
-               for (peer_node_id, pending_messages) in &mut *pending_msgs {
-                       msgs.insert(*peer_node_id, core::mem::take(pending_messages));
+               // release the pending message buffers individually.
+               for (node_id, recipient) in &mut *message_recipients {
+                       msgs.insert(*node_id, recipient.release_pending_messages());
                }
                msgs
        }
 }
 
-fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<OnionMessage>>) -> bool {
+fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, OnionMessageRecipient>) -> bool {
        const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
        const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
        let mut total_buffered_bytes = 0;
        let mut peer_buffered_bytes = 0;
        for (pk, peer_buf) in buffer {
-               for om in peer_buf {
+               for om in peer_buf.pending_messages() {
                        let om_len = om.serialized_length();
                        if pk == peer_node_id {
                                peer_buffered_bytes += om_len;
@@ -618,6 +805,27 @@ fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, Ve
        false
 }
 
+impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> EventsProvider
+for OnionMessenger<ES, NS, L, MR, OMH, CMH>
+where
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+       MR::Target: MessageRouter,
+       OMH::Target: OffersMessageHandler,
+       CMH::Target: CustomOnionMessageHandler,
+{
+       fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+               for (node_id, recipient) in self.message_recipients.lock().unwrap().iter_mut() {
+                       if let OnionMessageRecipient::PendingConnection(_, addresses, _) = recipient {
+                               if let Some(addresses) = addresses.take() {
+                                       handler.handle_event(Event::ConnectionNeeded { node_id: *node_id, addresses });
+                               }
+                       }
+               }
+       }
+}
+
 impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> OnionMessageHandler
 for OnionMessenger<ES, NS, L, MR, OMH, CMH>
 where
@@ -660,24 +868,28 @@ where
                                }
                        },
                        Ok(PeeledOnion::Forward(next_node_id, onion_message)) => {
-                               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
-                               if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
+                               let mut message_recipients = self.message_recipients.lock().unwrap();
+                               if outbound_buffer_full(&next_node_id, &message_recipients) {
                                        log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
                                        return
                                }
 
                                #[cfg(fuzzing)]
-                               pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
-
-                               match pending_per_peer_msgs.entry(next_node_id) {
-                                       hash_map::Entry::Vacant(_) => {
+                               message_recipients
+                                       .entry(next_node_id)
+                                       .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new()));
+
+                               match message_recipients.entry(next_node_id) {
+                                       hash_map::Entry::Occupied(mut e) if matches!(
+                                               e.get(), OnionMessageRecipient::ConnectedPeer(..)
+                                       ) => {
+                                               e.get_mut().enqueue_message(onion_message);
+                                               log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+                                       },
+                                       _ => {
                                                log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
                                                return
                                        },
-                                       hash_map::Entry::Occupied(mut e) => {
-                                               e.get_mut().push_back(onion_message);
-                                               log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
-                                       }
                                }
                        },
                        Err(e) => {
@@ -688,15 +900,42 @@ where
 
        fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if init.features.supports_onion_messages() {
-                       let mut peers = self.pending_messages.lock().unwrap();
-                       peers.insert(their_node_id.clone(), VecDeque::new());
+                       self.message_recipients.lock().unwrap()
+                               .entry(*their_node_id)
+                               .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new()))
+                               .mark_connected();
+               } else {
+                       self.message_recipients.lock().unwrap().remove(their_node_id);
                }
+
                Ok(())
        }
 
        fn peer_disconnected(&self, their_node_id: &PublicKey) {
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
-               pending_msgs.remove(their_node_id);
+               match self.message_recipients.lock().unwrap().remove(their_node_id) {
+                       Some(OnionMessageRecipient::ConnectedPeer(..)) => {},
+                       _ => debug_assert!(false),
+               }
+       }
+
+       fn timer_tick_occurred(&self) {
+               let mut message_recipients = self.message_recipients.lock().unwrap();
+
+               // Drop any pending recipients since the last call to avoid retaining buffered messages for
+               // too long.
+               message_recipients.retain(|_, recipient| match recipient {
+                       OnionMessageRecipient::PendingConnection(_, None, ticks) => *ticks < MAX_TIMER_TICKS,
+                       OnionMessageRecipient::PendingConnection(_, Some(_), _) => true,
+                       _ => true,
+               });
+
+               // Increment a timer tick for pending recipients so that their buffered messages are dropped
+               // at MAX_TIMER_TICKS.
+               for recipient in message_recipients.values_mut() {
+                       if let OnionMessageRecipient::PendingConnection(_, None, ticks) = recipient {
+                               *ticks += 1;
+                       }
+               }
        }
 
        fn provided_node_features(&self) -> NodeFeatures {
@@ -721,7 +960,7 @@ where
                        let PendingOnionMessage { contents, destination, reply_path } = message;
                        #[cfg(c_bindings)]
                        let (contents, destination, reply_path) = message;
-                       self.find_path_and_enqueue_onion_message(
+                       let _ = self.find_path_and_enqueue_onion_message(
                                contents, destination, reply_path, format_args!("when sending OffersMessage")
                        );
                }
@@ -732,16 +971,14 @@ where
                        let PendingOnionMessage { contents, destination, reply_path } = message;
                        #[cfg(c_bindings)]
                        let (contents, destination, reply_path) = message;
-                       self.find_path_and_enqueue_onion_message(
+                       let _ = self.find_path_and_enqueue_onion_message(
                                contents, destination, reply_path, format_args!("when sending CustomMessage")
                        );
                }
 
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
-               if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
-                       return msgs.pop_front()
-               }
-               None
+               self.message_recipients.lock().unwrap()
+                       .get_mut(&peer_node_id)
+                       .and_then(|buffer| buffer.dequeue_message())
        }
 }
 
@@ -759,7 +996,7 @@ pub type SimpleArcOnionMessenger<M, T, F, L> = OnionMessenger<
        Arc<KeysManager>,
        Arc<KeysManager>,
        Arc<L>,
-       Arc<DefaultMessageRouter>,
+       Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>,
        Arc<SimpleArcChannelManager<M, T, F, L>>,
        IgnoringMessageHandler
 >;
@@ -778,7 +1015,7 @@ pub type SimpleRefOnionMessenger<
        &'a KeysManager,
        &'a KeysManager,
        &'b L,
-       &'i DefaultMessageRouter,
+       &'i DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L>,
        &'j SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
        IgnoringMessageHandler
 >;